hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ddcfb28fac7d3ac57a40dcf78450296b4aac5e8
| 13,643
|
py
|
Python
|
evolveface/head/metrics.py
|
BillKerman/FaceNetCustomized
|
30bb99b62f960034c4aa4206d7dc22de672a997f
|
[
"MIT"
] | 4
|
2020-08-30T13:31:34.000Z
|
2020-12-31T08:18:27.000Z
|
evolveface/head/metrics.py
|
BillKerman/FaceNetCustomized
|
30bb99b62f960034c4aa4206d7dc22de672a997f
|
[
"MIT"
] | null | null | null |
evolveface/head/metrics.py
|
BillKerman/FaceNetCustomized
|
30bb99b62f960034c4aa4206d7dc22de672a997f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
import math
# Support: ['Softmax', 'ArcFace', 'CosFace', 'SphereFace', 'Am_softmax']
class Softmax(nn.Module):
r"""Implement of Softmax (normal classification head):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
"""
def __init__(self, in_features, out_features, device_id):
super(Softmax, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.device_id = device_id
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
self.bias = Parameter(torch.FloatTensor(out_features))
nn.init.xavier_uniform_(self.weight)
nn.init.zero_(self.bias)
def forward(self, x):
if self.device_id == None:
out = F.linear(x, self.weight, self.bias)
else:
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
sub_biases = torch.chunk(self.bias, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
bias = sub_biases[0].cuda(self.device_id[0])
out = F.linear(temp_x, weight, bias)
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
bias = sub_biases[i].cuda(self.device_id[i])
out = torch.cat((out, F.linear(temp_x, weight, bias).cuda(self.device_id[0])), dim=1)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
class ArcFace(nn.Module):
r"""Implement of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
s: norm of input feature
m: margin
cos(theta+m)
"""
def __init__(self, in_features, out_features, device_id, s=64.0, m=0.50, easy_margin=False):
super(ArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.device_id = device_id
self.s = s
self.m = m
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
if self.device_id == None:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
else:
x = input
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
cosine = F.linear(F.normalize(temp_x), F.normalize(weight))
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])),
dim=1)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros(cosine.size())
if self.device_id != None:
one_hot = one_hot.cuda(self.device_id[0])
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + (
(1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output
class CosFace(nn.Module):
r"""Implement of CosFace (https://arxiv.org/pdf/1801.09414.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
s: norm of input feature
m: margin
cos(theta)-m
"""
def __init__(self, in_features, out_features, device_id, s=64.0, m=0.35):
super(CosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.device_id = device_id
self.s = s
self.m = m
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
if self.device_id == None:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
else:
x = input
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
cosine = F.linear(F.normalize(temp_x), F.normalize(weight))
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])),
dim=1)
phi = cosine - self.m
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros(cosine.size())
if self.device_id != None:
one_hot = one_hot.cuda(self.device_id[0])
# one_hot = one_hot.cuda() if cosine.is_cuda else one_hot
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + (
(1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features = ' + str(self.in_features) \
+ ', out_features = ' + str(self.out_features) \
+ ', s = ' + str(self.s) \
+ ', m = ' + str(self.m) + ')'
class SphereFace(nn.Module):
r"""Implement of SphereFace (https://arxiv.org/pdf/1704.08063.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
m: margin
cos(m*theta)
"""
def __init__(self, in_features, out_features, device_id, m=4):
super(SphereFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.m = m
self.base = 1000.0
self.gamma = 0.12
self.power = 1
self.LambdaMin = 5.0
self.iter = 0
self.device_id = device_id
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
# duplication formula
self.mlambda = [
lambda x: x**0, lambda x: x**1, lambda x: 2 * x**2 - 1, lambda x: 4 * x**3 - 3 * x,
lambda x: 8 * x**4 - 8 * x**2 + 1, lambda x: 16 * x**5 - 20 * x**3 + 5 * x
]
def forward(self, input, label):
# lambda = max(lambda_min,base*(1+gamma*iteration)^(-power))
self.iter += 1
self.lamb = max(self.LambdaMin, self.base * (1 + self.gamma * self.iter)**(-1 * self.power))
# --------------------------- cos(theta) & phi(theta) ---------------------------
if self.device_id == None:
cos_theta = F.linear(F.normalize(input), F.normalize(self.weight))
else:
x = input
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
cos_theta = F.linear(F.normalize(temp_x), F.normalize(weight))
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
cos_theta = torch.cat(
(cos_theta, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1)
cos_theta = cos_theta.clamp(-1, 1)
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = cos_theta.data.acos()
k = (self.m * theta / 3.14159265).floor()
phi_theta = ((-1.0)**k) * cos_m_theta - 2 * k
NormOfFeature = torch.norm(input, 2, 1)
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros(cos_theta.size())
if self.device_id != None:
one_hot = one_hot.cuda(self.device_id[0])
one_hot.scatter_(1, label.view(-1, 1), 1)
# --------------------------- Calculate output ---------------------------
output = (one_hot * (phi_theta - cos_theta) / (1 + self.lamb)) + cos_theta
output *= NormOfFeature.view(-1, 1)
return output
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features = ' + str(self.in_features) \
+ ', out_features = ' + str(self.out_features) \
+ ', m = ' + str(self.m) + ')'
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Am_softmax(nn.Module):
r"""Implement of Am_softmax (https://arxiv.org/pdf/1801.05599.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
m: margin
s: scale of outputs
"""
def __init__(self, in_features, out_features, device_id, m=0.35, s=30.0):
super(Am_softmax, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.m = m
self.s = s
self.device_id = device_id
self.kernel = Parameter(torch.Tensor(in_features, out_features))
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5) # initialize kernel
def forward(self, embbedings, label):
if self.device_id == None:
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
else:
x = embbedings
sub_kernels = torch.chunk(self.kernel, len(self.device_id), dim=1)
temp_x = x.cuda(self.device_id[0])
kernel_norm = l2_norm(sub_kernels[0], axis=0).cuda(self.device_id[0])
cos_theta = torch.mm(temp_x, kernel_norm)
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
kernel_norm = l2_norm(sub_kernels[i], axis=0).cuda(self.device_id[i])
cos_theta = torch.cat((cos_theta, torch.mm(temp_x, kernel_norm).cuda(self.device_id[0])), dim=1)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
phi = cos_theta - self.m
label = label.view(-1, 1) # size=(B,1)
index = cos_theta.data * 0.0 # size=(B,Classnum)
index.scatter_(1, label.data.view(-1, 1), 1)
index = index.byte()
output = cos_theta * 1.0
output[index] = phi[index] # only change the correct predicted output
output *= self.s # scale up in order to make softmax work, first introduced in normface
return output
| 42.768025
| 120
| 0.563659
|
cfd3e2f5c997f180306859e28c45f385b4f972cb
| 42
|
py
|
Python
|
tests/test_croncierge/debug_cmd.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_croncierge/debug_cmd.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_croncierge/debug_cmd.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
for i in range(10):
print(f"Line {i}")
| 21
| 22
| 0.571429
|
2738c19c616e3418dddee186646195cffab4bcbe
| 5,515
|
py
|
Python
|
backend/tracker/api/types/project.py
|
dmitriyvek/Tracker
|
b2903d0e980c8480e9c9cbecbfa3987997c7f04e
|
[
"MIT"
] | null | null | null |
backend/tracker/api/types/project.py
|
dmitriyvek/Tracker
|
b2903d0e980c8480e9c9cbecbfa3987997c7f04e
|
[
"MIT"
] | null | null | null |
backend/tracker/api/types/project.py
|
dmitriyvek/Tracker
|
b2903d0e980c8480e9c9cbecbfa3987997c7f04e
|
[
"MIT"
] | null | null | null |
import graphene
from graphene.types import ResolveInfo
from tracker.api.connections import (
CustomPageInfo, create_connection_from_record_list,
validate_connection_params
)
from tracker.api.dataloaders import get_generic_loader
from tracker.api.scalars.projects import Description, Title
from tracker.api.schemas.projects import TitleDuplicationCheckSchema
from tracker.api.services import validate_input
from tracker.api.services.projects import (
check_title_duplication,
get_project_node,
get_total_count_of_user_projects,
)
from tracker.api.services.roles import (
ROLES_REQUIRED_FIELDS
)
from tracker.api.services.users import USERS_REQUIRED_FIELDS
from tracker.api.types.role import RoleType, RoleConnection
from tracker.api.wrappers import login_required
from tracker.db.schema import roles_table, users_table
class ProjectType(graphene.ObjectType):
title = Title(
required=True,
description='A title of a project',
)
description = Description(
required=False,
description='A description of a project',
)
created_at = graphene.DateTime(
required=True,
description='Project creation timestamp',
)
role_list = graphene.relay.ConnectionField(
RoleConnection,
required=True,
description='List of all roles in given project'
)
my_role = graphene.Field(
RoleType,
required=True,
description='Role of the current user in given project'
)
created_by = graphene.Field(
'tracker.api.types.user.UserType',
required=True,
description='User who created this project'
)
class Meta:
interfaces = (graphene.relay.Node, )
@classmethod
@login_required
async def get_node(cls, info: ResolveInfo, project_id):
project_id = int(project_id)
user_id = info.context['request']['user_id']
db = info.context['request'].app['db']
# may be used by different resolvers
info.context['request']['project_id'] = project_id
record = await get_project_node(db, info, project_id, user_id)
record = cls(**record)
return record
@staticmethod
async def resolve_created_by(parent, info: ResolveInfo):
if not info.context.get('created_by_loader'):
db = info.context['request'].app['db']
info.context['created_by_loader'] = get_generic_loader(
db=db,
table=users_table,
attr='id',
connection_params=None,
nested_connection=False,
required_fields=USERS_REQUIRED_FIELDS,
many=False,
)()
# parent is ProjectType in node; parent is dict in connection (list)
parent_id = parent.created_by if isinstance(
parent, ProjectType) else parent['created_by']
record = await info.context['created_by_loader'].load(parent_id)
return record
@staticmethod
async def resolve_role_list(
parent, info: ResolveInfo, **connection_params
):
# parent is ProjectType in node; parent is dict in connection (list)
is_list = not isinstance(parent, ProjectType)
if not info.context.get('role_list_loader'):
db = info.context['request'].app['db']
max_fetch_number = info.context['request'].app.\
get('config', {}).\
get('max_fetch_number')
connection_params = validate_connection_params(
connection_params,
RoleType,
max_fetch_number,
nested_connection=is_list
)
info.context['role_list_loader'] = get_generic_loader(
db=db,
table=roles_table,
attr='project_id',
connection_params=connection_params,
nested_connection=is_list,
required_fields=ROLES_REQUIRED_FIELDS,
many=True
)()
parent_id = parent['id'] if is_list else parent.id
record_list = await info.context['role_list_loader'].load(parent_id)
return create_connection_from_record_list(
record_list,
connection_params,
RoleConnection,
RoleType,
CustomPageInfo
)
class ProjectConnection(graphene.relay.Connection):
total_count = graphene.Int(
required=True,
description='Total number of user\'s projects'
)
class Meta:
node = ProjectType
@staticmethod
def resolve_total_count(parent, info: ResolveInfo):
db = info.context['request'].app['db']
user_id = info.context['request']['user_id']
total_count = get_total_count_of_user_projects(db, user_id)
return total_count
class ProjectDuplicationChecksType(graphene.ObjectType):
title = graphene.Boolean(
required=True,
description='Does user already have a project with given title',
title=Title(required=True)
)
@staticmethod
@login_required
async def resolve_title(parent, info: ResolveInfo, title):
db = info.context['request'].app['db']
user_id = info.context['request']['user_id']
data = {'title': title}
validate_input(data, TitleDuplicationCheckSchema)
is_existed = await check_title_duplication(
db, user_id=user_id, title=title
)
return is_existed
| 31.514286
| 76
| 0.642792
|
4482dcf0e29821d0a0138aef4bd0667f0d4a35d7
| 4,691
|
py
|
Python
|
ros/src/twist_controller/dbw_node.py
|
MasanaoMatsuda/CarND-CapstoneProject
|
d6e578514ecdd4c8a64b44ce012dc7674594a9ed
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
MasanaoMatsuda/CarND-CapstoneProject
|
d6e578514ecdd4c8a64b44ce012dc7674594a9ed
|
[
"MIT"
] | 10
|
2019-12-16T22:21:41.000Z
|
2022-03-12T00:07:35.000Z
|
ros/src/twist_controller/dbw_node.py
|
MasanaoMatsuda/CarND-Capstone
|
d6e578514ecdd4c8a64b44ce012dc7674594a9ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
rospy.Subscriber("/vehicle/dbw_enabled", Bool, self.dbw_enabled_cb)
rospy.Subscriber("/twist_cmd", TwistStamped, self.twist_cb)
rospy.Subscriber("/current_velocity", TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
if not None in (self.throttle, self.brake, self.steering):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel, self.dbw_enabled, self.linear_vel, self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 39.420168
| 153
| 0.679386
|
03b7b0b7f146715aa2917f8e3606fb5884c72da0
| 3,415
|
py
|
Python
|
evalai/utils/requests.py
|
cryptobali/evalai-cli
|
a658e23a6c4c65ec10b017087de93d09bab08ab4
|
[
"BSD-3-Clause"
] | 1
|
2021-06-14T12:07:20.000Z
|
2021-06-14T12:07:20.000Z
|
evalai/utils/requests.py
|
cryptobali/evalai-cli
|
a658e23a6c4c65ec10b017087de93d09bab08ab4
|
[
"BSD-3-Clause"
] | 729
|
2020-01-21T20:33:00.000Z
|
2021-08-02T23:22:14.000Z
|
evalai/utils/requests.py
|
cryptobali/evalai-cli
|
a658e23a6c4c65ec10b017087de93d09bab08ab4
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import requests
import sys
from click import echo, style
from evalai.utils.config import EVALAI_ERROR_CODES
from evalai.utils.common import validate_token
from .auth import get_request_header, get_host_url
def make_request(path, method, files=None, data=None):
url = "{}{}".format(get_host_url(), path)
headers = get_request_header()
if method == "GET":
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}\n".format(response.json().get("error")),
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
return response.json()
elif method == "POST":
if files:
files = {"input_file": open(files, "rb")}
else:
files = None
data = {"status": "submitting"}
try:
response = requests.post(
url, headers=headers, files=files, data=data
)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = json.loads(response.text)
echo(
style(
"\nYour docker file is successfully submitted.\n",
fg="green",
bold=True,
)
)
echo(
style(
"You can use `evalai submission {}` to view this submission's status.\n".format(
response.get("id")
),
bold=True,
)
)
return response
elif method == "PUT":
# TODO: Add support for PUT request
pass
elif method == "PATCH":
# TODO: Add support for PATCH request
pass
elif method == "DELETE":
# TODO: Add support for DELETE request
pass
| 31.62037
| 96
| 0.464714
|
b1f66448d6648c974b8b65ea4de92298db9baf20
| 444
|
py
|
Python
|
examples/load_from_files_2.py
|
ikonst/jschon
|
4aa5c2ffce1dca831342aab232bceff9c542c137
|
[
"MIT"
] | 53
|
2021-03-31T15:32:14.000Z
|
2022-03-19T03:32:00.000Z
|
examples/load_from_files_2.py
|
ikonst/jschon
|
4aa5c2ffce1dca831342aab232bceff9c542c137
|
[
"MIT"
] | 27
|
2021-03-31T15:43:38.000Z
|
2022-03-25T08:24:49.000Z
|
examples/load_from_files_2.py
|
ikonst/jschon
|
4aa5c2ffce1dca831342aab232bceff9c542c137
|
[
"MIT"
] | 5
|
2021-03-31T18:42:10.000Z
|
2022-02-02T13:52:11.000Z
|
import pathlib
from jschon import create_catalog, JSON, JSONSchema, URI, LocalSource
data_dir = pathlib.Path(__file__).parent / 'data'
catalog = create_catalog('2020-12')
catalog.add_uri_source(URI('https://example.com/'), LocalSource(data_dir, suffix='.json'))
org_schema = JSONSchema.loadf(data_dir / 'org-schema.json')
org_data = JSON.loadf(data_dir / 'org-data.json')
result = org_schema.evaluate(org_data)
print(result.output('flag'))
| 31.714286
| 90
| 0.761261
|
c4675f79c58f5e105a6ec8006f47a25e5ee48228
| 46,888
|
py
|
Python
|
hphp/hack/test/integration/common_tests.py
|
jmurret/hhvm
|
f005fa3ca2793291cf59e217db3e9ce074d22f71
|
[
"PHP-3.01",
"Zend-2.0"
] | 1
|
2020-01-17T02:24:38.000Z
|
2020-01-17T02:24:38.000Z
|
hphp/hack/test/integration/common_tests.py
|
jmurret/hhvm
|
f005fa3ca2793291cf59e217db3e9ce074d22f71
|
[
"PHP-3.01",
"Zend-2.0"
] | null | null | null |
hphp/hack/test/integration/common_tests.py
|
jmurret/hhvm
|
f005fa3ca2793291cf59e217db3e9ce074d22f71
|
[
"PHP-3.01",
"Zend-2.0"
] | null | null | null |
# pyre-strict
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import unittest
from typing import ClassVar, List, Mapping, Optional, Tuple
from hh_paths import hackfmt, hh_client, hh_merge_deps, hh_server
from test_case import TestCase, TestDriver
from utils import Json, JsonObject
class DebugSubscription(object):
"""
Wraps `hh_client debug`.
"""
# pyre-fixme[24]: Generic type `subprocess.Popen` expects 1 type parameter.
def __init__(self, proc: subprocess.Popen) -> None:
self.proc = proc
hello = self.read_msg()
assert hello["data"] == "hello"
def read_msg(self) -> Json:
line = self.proc.stdout.readline()
return json.loads(line)
def get_incremental_logs(self) -> JsonObject:
msgs = {}
while True:
msg = self.read_msg()
if msg["type"] == "info" and msg["data"] == "incremental_done":
break
msgs[msg["name"]] = msg
return msgs
class CommonTestDriver(TestDriver):
# This needs to be overridden in child classes. The files in this
# directory will be used to set up the initial environment for each
# test.
template_repo: ClassVar[str]
repo_dir: ClassVar[str]
test_env: ClassVar[Mapping[str, str]]
base_tmp_dir: ClassVar[str]
hh_tmp_dir: ClassVar[str]
bin_dir: ClassVar[str]
@classmethod
def setUpClass(cls, template_repo: str) -> None:
cls.template_repo = template_repo
cls.maxDiff = 2000
cls.base_tmp_dir = tempfile.mkdtemp()
# we don't create repo_dir using mkdtemp() because we want to create
# it with copytree(). copytree() will fail if the directory already
# exists.
cls.repo_dir = os.path.join(cls.base_tmp_dir, "repo")
# Where the hhi files, socket, etc get extracted
cls.hh_tmp_dir = tempfile.mkdtemp()
cls.bin_dir = tempfile.mkdtemp()
hh_server_dir = os.path.dirname(hh_server)
hh_merge_deps_dir = os.path.dirname(hh_merge_deps)
print("hh_server_dir " + hh_server_dir)
print("hh_merge_deps_dir " + hh_merge_deps_dir)
cls.test_env = dict(
os.environ,
**{
"HH_TEST_MODE": "1",
"HH_TMPDIR": cls.hh_tmp_dir,
"PATH": (
"%s:%s:%s:/bin:/usr/bin:/usr/local/bin"
% (hh_server_dir, hh_merge_deps_dir, cls.bin_dir)
),
"HH_HOME": os.path.dirname(hh_client),
"OCAMLRUNPARAM": "b",
"HH_LOCALCONF_PATH": cls.repo_dir,
},
)
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree(cls.base_tmp_dir)
shutil.rmtree(cls.bin_dir)
shutil.rmtree(cls.hh_tmp_dir)
def write_load_config(self, use_saved_state: bool = False) -> None:
"""
Writes out a script that will print the list of changed files,
and adds the path to that script to .hhconfig
"""
raise NotImplementedError()
def wait_until_server_ready(self) -> None:
"""
We don't want to accidentally connect to an old hh_server, so we wait 2
seconds for the monitor to start up the new server first.
"""
time.sleep(2)
self.run_check()
def start_hh_server(
self,
changed_files: Optional[List[str]] = None,
saved_state_path: Optional[str] = None,
args: Optional[List[str]] = None,
) -> None:
""" Start an hh_server. changed_files is ignored here (as it
has no meaning) and is only exposed in this API for the derived
classes.
"""
if changed_files is None:
changed_files = []
if args is None:
args = []
cmd = [hh_server, "--daemon", "--max-procs", "2", self.repo_dir] + args
self.proc_call(cmd)
self.wait_until_server_ready()
def stop_hh_server(self, retries: int = 3) -> None:
(_, _, exit_code) = self.proc_call([hh_client, "stop", self.repo_dir])
if exit_code == 0:
return
elif retries > 0 and exit_code != 0:
self.stop_hh_server(retries=retries - 1)
else:
self.assertEqual(exit_code, 0, msg="Stopping hh_server failed")
def get_server_logs(self) -> str:
time.sleep(2) # wait for logs to be written
log_file = self.proc_call([hh_client, "--logname", self.repo_dir])[0].strip()
with open(log_file) as f:
return f.read()
def get_monitor_logs(self) -> str:
time.sleep(2) # wait for logs to be written
log_file = self.proc_call([hh_client, "--monitor-logname", self.repo_dir])[
0
].strip()
with open(log_file) as f:
return f.read()
def setUp(self) -> None:
shutil.copytree(self.template_repo, self.repo_dir)
def tearDownWithRetries(self, retries: int = 3) -> None:
self.stop_hh_server(retries=retries)
shutil.rmtree(self.repo_dir)
def tearDown(self) -> None:
self.tearDownWithRetries()
@classmethod
# pyre-fixme[24]: Generic type `subprocess.Popen` expects 1 type parameter.
def proc_create(cls, args: List[str], env: Mapping[str, str]) -> subprocess.Popen:
return subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=dict(cls.test_env, **env),
universal_newlines=True,
)
@classmethod
def proc_call(
cls,
args: List[str],
env: Optional[Mapping[str, str]] = None,
stdin: Optional[str] = None,
) -> Tuple[str, str, int]:
"""
Invoke a subprocess, return stdout, send stderr to our stderr (for
debugging)
"""
env = {} if env is None else env
print(" ".join(args), file=sys.stderr)
proc = cls.proc_create(args, env)
(stdout_data, stderr_data) = proc.communicate(stdin)
sys.stderr.write(stderr_data)
sys.stderr.flush()
retcode = proc.wait()
return (stdout_data, stderr_data, retcode)
@classmethod
def wait_pid_with_timeout(cls, pid: int, timeout: int) -> None:
"""
Like os.waitpid but with a timeout in seconds.
"""
waited_time = 0
while True:
pid_expected, _ = os.waitpid(pid, os.WNOHANG)
if pid_expected == pid:
break
elif waited_time >= timeout:
raise subprocess.TimeoutExpired
else:
time.sleep(1)
waited_time += 1
def run_check(
self, stdin: Optional[str] = None, options: Optional[List[str]] = None
) -> Tuple[str, str, int]:
options = [] if options is None else options
root = self.repo_dir + os.path.sep
return self.proc_call(
[
hh_client,
"check",
"--retries",
"20",
"--error-format",
"raw",
self.repo_dir,
]
+ list(map(lambda x: x.format(root=root), options)),
stdin=stdin,
)
# Check to see if you can run hackfmt
def run_hackfmt_check(self) -> bool:
try:
#
(stdout_data, stderr_data, retcode) = self.proc_call(["hackfmt", "-help"])
return retcode == 0
# If the file isn't found you will get this
except FileNotFoundError:
return False
def run_hackfmt(
self,
stdin: Optional[str] = None,
options: Optional[List[str]] = None,
expected_output: Optional[str] = None,
) -> bool:
options = [] if options is None else options
(output, err, retcode) = self.proc_call([hackfmt] + options, stdin=stdin)
if retcode != 0:
print("check returned non-zero code: " + str(retcode), file=sys.stderr)
if expected_output is not None:
self.assertEqual(expected_output, output)
return True
# Runs `hh_client check` asserting the stdout is equal the expected.
# Returns stderr.
# Note: assert_laoded_mini_state is ignored here and only used
# in some derived classes.
def check_cmd(
self,
expected_output: Optional[List[str]],
stdin: Optional[str] = None,
options: Optional[List[str]] = None,
assert_loaded_saved_state: bool = False,
) -> str:
(output, err, retcode) = self.run_check(stdin, options)
root = self.repo_dir + os.path.sep
if retcode != 0:
print("check returned non-zero code: " + str(retcode), file=sys.stderr)
if expected_output is not None:
self.assertCountEqual(
map(lambda x: x.format(root=root), expected_output), output.splitlines()
)
return err
def check_cmd_and_json_cmd(
self,
expected_output: List[str],
expected_json: List[str],
stdin: Optional[str] = None,
options: Optional[List[str]] = None,
) -> None:
# we run the --json version first because --json --refactor doesn't
# change any files, but plain --refactor does (i.e. the latter isn't
# idempotent)
if options is None:
options = []
self.check_cmd(expected_json, stdin, options + ["--json"])
self.check_cmd(expected_output, stdin, options)
def subscribe_debug(self) -> DebugSubscription:
proc = self.proc_create([hh_client, "debug", self.repo_dir], env={})
return DebugSubscription(proc)
def start_hh_loop_forever_assert_timeout(self) -> None:
# create a file with 10 dependencies. Only "big" jobs, that use
# workers can be interrupted at the moment.
with open(os.path.join(self.repo_dir, "__hh_loop_forever_foo.php"), "w") as f:
f.write(
"""<?hh //strict
function __hh_loop_forever_foo(): int {
return 4;
}"""
)
for i in range(1, 10):
with open(
os.path.join(self.repo_dir, "__hh_loop_forever_bar%d.php" % i), "w"
) as f:
f.write(
"""<?hh //strict
function __hh_loop_forever_bar%d(): int {
return __hh_loop_forever_foo();
}"""
% i
)
self.check_cmd(["No errors!"])
# trigger rechecking of all 11 files, and make one of them loop
# until cancelled
with open(os.path.join(self.repo_dir, "__hh_loop_forever_foo.php"), "w") as f:
f.write(
"""<?hh //strict
function __hh_loop_forever_foo(): string {
hh_loop_forever();
}"""
)
# this should timeout due to infinite loop
try:
# empty output means no results due to timeout
self.check_cmd([], options=["--retries", "1"])
except AssertionError:
# one of the test drivers doesn't like timeouts
pass
def stop_hh_loop_forever(self) -> None:
# subsequent change should interrupt the "loop forever" part
with open(os.path.join(self.repo_dir, "__hh_loop_forever_foo.php"), "w") as f:
f.write(
"""<?hh //strict
function __hh_loop_forever_foo(): int {
return 4;
}"""
)
self.check_cmd(["No errors!"])
# The most basic of tests.
# Exercises server responsiveness, and updating errors after changing files
class BarebonesTests(TestCase[CommonTestDriver]):
# hh should should work with 0 retries.
def test_responsiveness(self) -> None:
self.test_driver.start_hh_server()
self.test_driver.check_cmd(["No errors!"])
self.test_driver.check_cmd(["No errors!"], options=["--retries", "0"])
def test_new_file(self) -> None:
"""
Add a new file that contains an error.
"""
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh
function k(): int {
return 'a';
}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php"])
self.test_driver.check_cmd(
[
"{root}foo_4.php:4:24,26: Invalid return type (Typing[4110])",
" {root}foo_4.php:3:27,29: Expected int",
" {root}foo_4.php:4:24,26: But got string",
]
)
def test_new_naming_error(self) -> None:
"""
Add a new file which contains a naming collisions with an old file
"""
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
class FOO {}
function H () {}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php"])
self.test_driver.check_cmd(
[
"{root}foo_4.php:3:19,21: Could not find FOO (Naming[2006])",
" {root}foo_3.php:7:15,17: Did you mean Foo?",
"{root}foo_4.php:3:19,21: Name already bound: FOO (Naming[2012])",
" {root}foo_3.php:7:15,17: Previous definition Foo differs only in capitalization ",
"{root}foo_4.php:4:22,22: Could not find H (Naming[2006])",
" {root}foo_3.php:3:18,18: Did you mean h?",
"{root}foo_4.php:4:22,22: Name already bound: H (Naming[2012])",
" {root}foo_3.php:3:18,18: Previous definition h differs only in capitalization ",
]
)
# We put this test in Barebones tests so that dependencies on class B
# show an error (i.e. class_3.php) with both the save state driver
# and the classic save state driver
def test_modify_extends_deps(self) -> None:
"""
Introduce a change to a base class that causes an error
in a use case on one of its subclasses.
"""
with open(os.path.join(self.test_driver.repo_dir, "class_1.php"), "w") as f:
f.write(
"""<?hh // strict
class B {
public static function foo () : bool {
return true;
}
}
"""
)
self.test_driver.start_hh_server(changed_files=["class_1.php"])
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected int",
" {root}class_1.php:4:51,54: But got bool",
]
)
# Common tests, includes the Barebones Tests above
class CommonTests(BarebonesTests):
def test_json_errors(self) -> None:
"""
If you ask for errors in JSON format, you will get them on standard
output. Changing this will break the tools that depend on it (like
editor plugins), and this test is here to remind you about it.
"""
self.test_driver.start_hh_server()
stderr = self.test_driver.check_cmd([], options=["--json"])
last_line = stderr.splitlines()[-1]
output = json.loads(last_line)
self.assertEqual(output["errors"], [])
self.assertEqual(output["passed"], True)
self.assertIn("version", output)
def test_modify_file(self) -> None:
"""
Add an error to a file that previously had none.
"""
with open(os.path.join(self.test_driver.repo_dir, "foo_2.php"), "w") as f:
f.write(
"""<?hh
function g(): int {
return 'a';
}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_2.php"])
self.test_driver.check_cmd(
[
"{root}foo_2.php:4:24,26: Invalid return type (Typing[4110])",
" {root}foo_2.php:3:27,29: Expected int",
" {root}foo_2.php:4:24,26: But got string",
]
)
def test_deleted_file(self) -> None:
"""
Delete a file that still has dangling references before restoring from
a saved state.
"""
os.remove(os.path.join(self.test_driver.repo_dir, "foo_2.php"))
self.test_driver.start_hh_server(changed_files=["foo_2.php"])
self.test_driver.check_cmd(
[
"{root}foo_1.php:4:20,20: Unbound name: g (a global function) (Naming[2049])"
]
)
def test_file_delete_after_load(self) -> None:
"""
Delete a file that still has dangling references after restoring from
a saved state.
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(["No errors!"])
debug_sub = self.test_driver.subscribe_debug()
os.remove(os.path.join(self.test_driver.repo_dir, "foo_2.php"))
msgs = debug_sub.get_incremental_logs()
self.assertEqual(msgs["to_redecl_phase1"]["files"], ["foo_2.php"])
self.assertEqual(msgs["to_redecl_phase2"]["files"], ["foo_2.php"])
self.assertEqual(
set(msgs["to_recheck"]["files"]), set(["foo_1.php", "foo_2.php"])
)
self.test_driver.check_cmd(
[
"{root}foo_1.php:4:20,20: Unbound name: g (a global function) (Naming[2049])"
]
)
def test_duplicated_file(self) -> None:
self.test_driver.start_hh_server(changed_files=["foo_2.php"])
self.test_driver.check_cmd(["No errors!"])
shutil.copyfile(
os.path.join(self.test_driver.repo_dir, "foo_2.php"),
os.path.join(self.test_driver.repo_dir, "foo_2_dup.php"),
)
self.test_driver.check_cmd(
[
"{root}foo_2_dup.php:3:18,18: Name already bound: g (Naming[2012])",
" {root}foo_2.php:3:18,18: Previous definition is here",
]
)
os.remove(os.path.join(self.test_driver.repo_dir, "foo_2.php"))
self.test_driver.check_cmd(["No errors!"])
def test_moved_file(self) -> None:
"""
Move a file, then create an error that references a definition in it.
Check that the new file name is displayed in the error.
"""
self.test_driver.start_hh_server(
changed_files=["foo_1.php", "foo_2.php", "bar_2.php"]
)
os.rename(
os.path.join(self.test_driver.repo_dir, "foo_2.php"),
os.path.join(self.test_driver.repo_dir, "bar_2.php"),
)
with open(os.path.join(self.test_driver.repo_dir, "foo_1.php"), "w") as f:
f.write(
"""<?hh
function f(): string {
return g();
}
"""
)
self.test_driver.check_cmd(
[
"{root}foo_1.php:4:24,26: Invalid return type (Typing[4110])",
" {root}foo_1.php:3:27,32: Expected string",
" {root}bar_2.php:3:23,25: But got int",
]
)
def test_find_refs(self) -> None:
"""
Test hh_client --find-refs, --find-class-refs
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
['File "{root}foo_3.php", line 11, characters 13-13: h', "1 total results"],
[
'[{{"name":"h","filename":"{root}foo_3.php","line":11,"char_start":13,"char_end":13}}]'
],
options=["--find-refs", "h"],
)
self.test_driver.check_cmd_and_json_cmd(
[
'File "{root}foo_3.php", line 10, characters 17-19: Foo::__construct',
"1 total results",
],
[
'[{{"name":"Foo::__construct","filename":"{root}foo_3.php","line":10,"char_start":17,"char_end":19}}]'
],
options=["--find-refs", "Foo::__construct"],
)
self.test_driver.check_cmd_and_json_cmd(
[
'File "{root}foo_3.php", line 10, characters 17-19: Foo',
"1 total results",
],
[
'[{{"name":"Foo","filename":"{root}foo_3.php","line":10,'
'"char_start":17,"char_end":19}}]'
],
options=["--find-class-refs", "Foo"],
)
def test_ide_find_refs(self) -> None:
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
[
"Foo",
'File "{root}foo_3.php", line 10, characters 17-19:',
"1 total results",
],
[
'[{{"name":"Foo","filename":"{root}foo_3.php",'
'"line":10,"char_start":17,"char_end":19}}]'
],
options=["--ide-find-refs", "1:20"],
stdin="<?hh function test(Foo $foo) { new Foo(); }",
)
def test_ide_highlight_refs(self) -> None:
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
["line 1, characters 20-22", "line 1, characters 36-38", "2 total results"],
[
'[{{"line":1,"char_start":20,"char_end":22}},'
'{{"line":1,"char_start":36,"char_end":38}}]'
],
options=["--ide-highlight-refs", "1:20"],
stdin="<?hh function test(Foo $foo) { new Foo(); }",
)
def test_search(self) -> None:
"""
Test hh_client --search
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
[
'File "{root}foo_3.php", line 9, characters 18-40: some_long_function_name, function'
],
[
'[{{"name":"some_long_function_name","filename":"{root}foo_3.php","desc":"function","line":9,"char_start":18,"char_end":40,"scope":""}}]'
],
options=["--search", "some_lo"],
)
def test_search_case_insensitive1(self) -> None:
"""
Test that global search is not case sensitive
"""
self.maxDiff = None
self.test_driver.start_hh_server()
self.test_driver.check_cmd(
[
'File "{root}foo_4.php", line 4, characters 10-24: '
"aaaaaaaaaaa_fun, function",
'File "{root}foo_4.php", line 3, characters 7-23: '
"Aaaaaaaaaaa_class, class",
],
options=["--search", "Aaaaaaaaaaa"],
)
def test_search_case_insensitive2(self) -> None:
"""
Test that global search is not case sensitive
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(
[
'File "{root}foo_4.php", line 4, characters 10-24: '
"aaaaaaaaaaa_fun, function",
'File "{root}foo_4.php", line 3, characters 7-23: '
"Aaaaaaaaaaa_class, class",
],
options=["--search", "aaaaaaaaaaa"],
)
def test_auto_complete(self) -> None:
"""
Test hh_client --auto-complete
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
["some_long_function_name (function(): _)"],
[
# test the --json output because the non-json one doesn't contain
# the filename, and we are especially interested in testing file
# paths
# the doubled curly braces are because this string gets passed
# through format()
'[{{"name":"some_long_function_name",'
'"type":"(function(): _)",'
'"pos":{{"filename":"{root}foo_3.php",'
'"line":9,"char_start":18,"char_end":40}},'
'"func_details":{{"min_arity":0,"return_type":"_","params":[]}},'
'"expected_ty":false}}]'
],
options=["--auto-complete"],
stdin="<?hh function f() { some_AUTO332\n",
)
def test_list_files(self) -> None:
"""
Test hh_client --list-files
"""
os.remove(os.path.join(self.test_driver.repo_dir, "foo_2.php"))
self.test_driver.start_hh_server(changed_files=["foo_2.php"])
self.test_driver.check_cmd_and_json_cmd(
["{root}foo_1.php"],
["{root}foo_1.php"], # see comment for identify-function
options=["--list-files"],
)
def test_type_at_pos(self) -> None:
"""
Test hh_client --type-at-pos
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
["string"],
[
'{{"type":"string",'
+ '"pos":{{"filename":"","line":0,"char_start":0,"char_end":0}},'
+ '"full_type":{{"kind":"primitive","name":"string"}}}}'
],
options=["--type-at-pos", "{root}foo_3.php:11:14"],
)
def test_type_at_pos_batch(self) -> None:
"""
Test hh_client --type-at-pos-batch
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(
[
'{{"position":'
+ '{{"file":"{root}foo_3.php",'
+ '"line":11,'
+ '"character":14}}'
+ ',"type":{{'
+ '"kind":"primitive",'
+ '"name":"string"}}}}'
],
options=["--type-at-pos-batch", "{root}foo_3.php:11:14"],
)
def test_ide_get_definition(self) -> None:
"""
Test hh_client --ide-get-definition
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
[
"name: \\bar, kind: function, span: line 1, characters 42-44,"
" is_declaration: false",
"definition:",
" bar",
" kind: function",
" id: function::bar",
' position: File "", line 1, characters 15-17:',
' span: File "", line 1, character 6 - line 1, character 22:',
" modifiers: ",
" params:",
"",
"",
],
[
'[{{"name":"\\\\bar","result_type":"function",'
'"pos":{{"filename":"","line":1,"char_start":42,"char_end":44}},'
'"definition_pos":{{"filename":"","line":1,"char_start":15,'
'"char_end":17}},"definition_span":{{"filename":"","line_start":1,'
'"char_start":6,"line_end":1,"char_end":22}},'
'"definition_id":"function::bar"}}]'
],
options=["--ide-get-definition", "1:43"],
stdin="<?hh function bar() {} function test() { bar() }",
)
def test_ide_outline(self) -> None:
"""
Test hh_client --ide-outline
"""
self.test_driver.start_hh_server()
"""
This call is here to ensure that server is running. Outline command
doesn't require it to be, but integration test suite assumes it and
checks it's state after each test.
"""
self.test_driver.check_cmd(["No errors!"])
self.test_driver.check_cmd_and_json_cmd(
[
"bar",
" kind: function",
" id: function::bar",
' position: File "", line 1, characters 15-17:',
' span: File "", line 1, character 6 - line 1, character 22:',
" modifiers: ",
" params:",
"",
],
[
'[{{"kind":"function","name":"bar","id":"function::bar",'
'"position":{{"filename":"",'
'"line":1,"char_start":15,"char_end":17}},"span":'
'{{"filename":"","line_start":1,"char_start":6,"line_end":1,'
'"char_end":22}},"modifiers":[],"params":[]}}]'
],
options=["--ide-outline"],
stdin="<?hh function bar() {}",
)
def test_ide_get_definition_multi_file(self) -> None:
"""
Test hh_client --ide-get-definition when definition we look for is
in file different from input file
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd_and_json_cmd(
[
"name: \\ClassToBeIdentified::methodToBeIdentified, kind: method,"
" span: line 1, characters 45-64, is_declaration: false",
"definition:",
" methodToBeIdentified",
" kind: method",
" id: method::ClassToBeIdentified::methodToBeIdentified",
' position: File "{root}foo_5.php", line 4, characters 26-45:',
' span: File "{root}foo_5.php", line 4, character 3 - line 4,'
" character 50:",
" modifiers: public static ",
" params:",
"",
"",
],
[
'[{{"name":"\\\\ClassToBeIdentified::methodToBeIdentified",'
'"result_type":"method","pos":{{"filename":"","line":1,'
'"char_start":45,"char_end":64}},"definition_pos":'
'{{"filename":"{root}foo_5.php","line":4,"char_start":26,'
'"char_end":45}},"definition_span":{{"filename":"{root}foo_5.php",'
'"line_start":4,"char_start":3,"line_end":4,"char_end":50}},'
'"definition_id":'
'"method::ClassToBeIdentified::methodToBeIdentified"}}]'
],
options=["--ide-get-definition", "1:50"],
stdin="<?hh function test() { "
"ClassToBeIdentified::methodToBeIdentified () }",
)
def test_format(self) -> None:
"""
Test --format
"""
if not self.test_driver.run_hackfmt_check():
raise unittest.SkipTest("Hackfmt can't be found. Skipping.")
self.test_driver.run_hackfmt(
expected_output="function test1(int $x) {\n"
+ " $x = $x * x + 3;\n"
+ " return f($x);\n"
+ "}\n",
options=["--indent-width", "2", "--range", "7", "63"],
stdin="""<?hh
function test1(int $x) { $x = $x*x + 3; return f($x); }
function test2(int $x) { $x = $x*x + 5; return f($x); }
""",
)
def test_abnormal_typechecker_exit_message(self) -> None:
"""
Tests that the monitor outputs a useful message when its typechecker
exits abnormally.
"""
self.test_driver.start_hh_server()
monitor_logs = self.test_driver.get_monitor_logs()
m = re.search(
"Just started typechecker server with pid: ([0-9]+)", monitor_logs
)
self.assertIsNotNone(m)
assert m is not None, "for mypy"
pid = m.group(1)
self.assertIsNotNone(pid)
os.kill(int(pid), signal.SIGTERM)
# For some reason, waitpid in the monitor after the kill signal
# sent above doesn't preserve ordering - maybe because they're
# in separate processes? Give it some time.
time.sleep(1)
client_error = self.test_driver.check_cmd(
expected_output=None, assert_loaded_saved_state=False
)
self.assertIn("Last server killed by signal", client_error)
def test_duplicate_parent(self) -> None:
"""
This checks that we handle duplicate parent classes, i.e. when Bar
extends Foo and there are two declarations of Foo. We want to make sure
that when the duplicate gets removed, we recover correctly by
redeclaring Bar with the remaining parent class.
"""
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
class Foo { // also declared in foo_3.php in setUpClass
public static $x;
}
"""
)
with open(os.path.join(self.test_driver.repo_dir, "foo_5.php"), "w") as f:
f.write(
"""<?hh //partial
class Bar extends Foo {}
function main(Bar $a) {
return $a::$y;
}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php", "foo_5.php"])
self.test_driver.check_cmd(
[
"{root}foo_4.php:3:19,21: Name already bound: Foo (Naming[2012])",
" {root}foo_3.php:7:15,17: Previous definition is here",
"{root}foo_5.php:6:28,29: No class variable '$y' in Bar (did you mean '$x'?) (Typing[4090])",
" {root}foo_5.php:3:19,21: Declaration of Bar is here",
]
)
os.remove(os.path.join(self.test_driver.repo_dir, "foo_4.php"))
self.test_driver.check_cmd(
[
"{root}foo_5.php:6:28,29: No class variable '$y' in Bar (Typing[4090])",
" {root}foo_5.php:3:19,21: Declaration of Bar is here",
]
)
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
class Foo {
public static $y;
}
"""
)
os.remove(os.path.join(self.test_driver.repo_dir, "foo_3.php"))
self.test_driver.check_cmd(["No errors!"])
def test_refactor_methods(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
class Bar extends Foo {
public function f() {}
public function g() {}
}
class Baz extends Bar {
public function g() {
$this->f();
}
}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php"])
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 1 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":84,"char_end":85,"line":4,"col_start":33,'
'"col_end":33,"patch_type":"replace","replacement":"wat"}},'
'{{"char_start":246,"char_end":247,"line":10,"col_start":28,'
'"col_end":28,"patch_type":"replace","replacement":"wat"}}]}}]'
],
options=["--refactor", "Method", "Bar::f", "Bar::wat"],
)
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 1 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":125,"char_end":126,"line":5,"col_start":33,'
'"col_end":33,"patch_type":"replace",'
'"replacement":"overrideMe"}},{{"char_start":215,'
'"char_end":216,"line":9,"col_start":33,"col_end":33,'
'"patch_type":"replace","replacement":"overrideMe"}}]}}]'
],
options=["--refactor", "Method", "Bar::g", "Bar::overrideMe"],
)
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 2 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":46,"char_end":49,"line":3,"col_start":31,'
'"col_end":33,"patch_type":"replace","replacement":"Qux"}}]}},'
'{{"filename":"{root}foo_3.php","patches":[{{'
'"char_start":96,"char_end":99,"line":7,"col_start":15,'
'"col_end":17,"patch_type":"replace","replacement":"Qux"}},'
'{{"char_start":165,"char_end":168,"line":10,"col_start":17,'
'"col_end":19,"patch_type":"replace","replacement":"Qux"}}]'
"}}]"
],
options=["--refactor", "Class", "Foo", "Qux"],
)
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php")) as f:
out = f.read()
self.assertEqual(
out,
"""<?hh //partial
class Bar extends Qux {
public function wat() {}
public function overrideMe() {}
}
class Baz extends Bar {
public function overrideMe() {
$this->wat();
}
}
""",
)
with open(os.path.join(self.test_driver.repo_dir, "foo_3.php")) as f:
out = f.read()
self.assertEqual(
out,
"""<?hh //partial
function h(): string {
return "a";
}
class Qux {}
function some_long_function_name() {
new Qux();
h();
}
""",
)
def test_refactor_functions(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
function wow() {
wat();
return f();
}
function wat() {}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php"])
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 1 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":132,"char_end":135,"line":8,"col_start":22,'
'"col_end":24,"patch_type":"replace","replacement":"woah"}},'
'{{"char_start":61,"char_end":64,"line":4,"col_start":17,'
'"col_end":19,"patch_type":"replace","replacement":"woah"}}]'
"}}]"
],
options=["--refactor", "Function", "wat", "woah"],
)
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 2 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":92,"char_end":93,"line":5,"col_start":24,'
'"col_end":24,"patch_type":"replace","replacement":"fff"}}]}},'
'{{"filename":"{root}foo_1.php","patches":[{{'
'"char_start":33,"char_end":34,"line":3,"col_start":18,'
'"col_end":18,"patch_type":"replace","replacement":"fff"}}]'
"}}]"
],
options=["--refactor", "Function", "f", "fff"],
)
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php")) as f:
out = f.read()
self.assertEqual(
out,
"""<?hh //partial
function wow() {
woah();
return fff();
}
function woah() {}
""",
)
with open(os.path.join(self.test_driver.repo_dir, "foo_1.php")) as f:
out = f.read()
self.assertEqual(
out,
"""<?hh //partial
function fff() {
return g() + 1;
}
""",
)
def test_refactor_typedefs(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php"), "w") as f:
f.write(
"""<?hh //partial
newtype NewType = int;
type Type = int;
class MyClass {
public function myFunc(Type $x): NewType {
return $x;
}
}
"""
)
self.test_driver.start_hh_server(changed_files=["foo_4.php"])
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 1 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":36,"char_end":43,"line":3,"col_start":21,'
'"col_end":27,"patch_type":"replace","replacement":"NewTypeX"}},'
'{{"char_start":158,"char_end":165,"line":7,"col_start":50,'
'"col_end":56,"patch_type":"replace","replacement":"NewTypeX"}}]'
"}}]"
],
options=["--refactor", "Class", "NewType", "NewTypeX"],
)
self.test_driver.check_cmd_and_json_cmd(
["Rewrote 1 files."],
[
'[{{"filename":"{root}foo_4.php","patches":[{{'
'"char_start":69,"char_end":73,"line":4,"col_start":18,'
'"col_end":21,"patch_type":"replace","replacement":"TypeX"}},'
'{{"char_start":149,"char_end":153,"line":7,"col_start":40,'
'"col_end":43,"patch_type":"replace","replacement":"TypeX"}}]'
"}}]"
],
options=["--refactor", "Class", "Type", "TypeX"],
)
with open(os.path.join(self.test_driver.repo_dir, "foo_4.php")) as f:
out = f.read()
self.assertEqual(
out,
"""<?hh //partial
newtype NewTypeX = int;
type TypeX = int;
class MyClass {
public function myFunc(TypeX $x): NewTypeX {
return $x;
}
}
""",
)
def test_auto_namespace_alias_addition(self) -> None:
"""
Add namespace alias and check if it is still good
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(["No errors!"])
with open(os.path.join(self.test_driver.repo_dir, "auto_ns_2.php"), "w") as f:
f.write(
"""<?hh //partial
function haha() {
Herp\\f();
return 1;
}
"""
)
self.test_driver.check_cmd(["No errors!"])
def test_interrupt(self) -> None:
# filesystem interruptions are only triggered by Watchman
with open(os.path.join(self.test_driver.repo_dir, ".watchmanconfig"), "w") as f:
f.write("{}")
with open(os.path.join(self.test_driver.repo_dir, "hh.conf"), "a") as f:
f.write(
"use_watchman = true\n"
+ "interrupt_on_watchman = true\n"
+ "interrupt_on_client = true\n"
+ "watchman_subscribe_v2 = true\n"
)
self.test_driver.start_hh_server()
self.test_driver.start_hh_loop_forever_assert_timeout()
self.test_driver.check_cmd(
["string"], options=["--type-at-pos", "{root}foo_3.php:11:14"]
)
self.test_driver.stop_hh_loop_forever()
def test_status_single(self) -> None:
"""
Test hh_client check --single
"""
self.test_driver.start_hh_server()
with open(
os.path.join(self.test_driver.repo_dir, "typing_error.php"), "w"
) as f:
f.write("<?hh //strict\n function aaaa(): int { return h(); }")
self.test_driver.check_cmd(
[
"{root}typing_error.php:2:32,34: Invalid return type (Typing[4110])",
" {root}typing_error.php:2:19,21: Expected int",
" {root}foo_3.php:3:23,28: But got string",
],
options=["--single", "{root}typing_error.php"],
stdin="",
)
self.test_driver.check_cmd(
[
":2:32,34: Invalid return type (Typing[4110])",
" :2:19,21: Expected int",
" {root}foo_3.php:3:23,28: But got string",
],
options=["--single", "-"],
stdin="<?hh //strict\n function aaaa(): int { return h(); }",
)
def test_lint_xcontroller(self) -> None:
self.test_driver.start_hh_server()
with open(os.path.join(self.test_driver.repo_dir, "in_list.txt"), "w") as f:
f.write(os.path.join(self.test_driver.repo_dir, "xcontroller.php"))
with open(os.path.join(self.test_driver.repo_dir, "xcontroller.php"), "w") as f:
f.write(
"<?hh\n class MyXController extends XControllerBase { "
"public function getPath() { return f(); } }"
)
self.test_driver.check_cmd(
[
'File "{root}xcontroller.php", line 2, characters 8-20:',
"When linting MyXController: The body of isDelegateOnly should "
"only contain `return true;` or `return false;` (Lint[5615])",
'File "{root}xcontroller.php", line 2, characters 8-20:',
"When linting MyXController: getPath method of MyXController must "
"be present and return a static literal for build purposes (Lint[5615])",
],
options=["--lint-xcontroller", "{root}in_list.txt"],
)
def test_incremental_typecheck_same_file(self) -> None:
self.maxDiff = None
self.test_driver.start_hh_server()
# Important: typecheck the file after creation but before adding contents
# to test forward naming table updating.
open(
os.path.join(
self.test_driver.repo_dir, "test_incremental_typecheck_same_file.php"
),
"w",
).close()
self.test_driver.check_cmd(["No errors!"])
with open(
os.path.join(
self.test_driver.repo_dir, "test_incremental_typecheck_same_file.php"
),
"w",
) as f:
f.write(
"""<?hh // strict
// test_incremental_typecheck_same_file
class TestIncrementalTypecheckSameFile {}
"""
)
self.test_driver.check_cmd(["No errors!"])
# Notice how the only change is the removed doc block.
with open(
os.path.join(
self.test_driver.repo_dir, "test_incremental_typecheck_same_file.php"
),
"w",
) as f:
f.write(
"""<?hh // strict
class TestIncrementalTypecheckSameFile {}
"""
)
self.test_driver.check_cmd(["No errors!"])
| 35.174794
| 153
| 0.516401
|
08eaf5956f7e6e84ae3e71a81bc90b6a3aab5ce3
| 25,289
|
py
|
Python
|
sqlmodel/main.py
|
spawn08/sqlmodel
|
02697459b8ea25cee72e81239828139698a66ac2
|
[
"MIT"
] | null | null | null |
sqlmodel/main.py
|
spawn08/sqlmodel
|
02697459b8ea25cee72e81239828139698a66ac2
|
[
"MIT"
] | null | null | null |
sqlmodel/main.py
|
spawn08/sqlmodel
|
02697459b8ea25cee72e81239828139698a66ac2
|
[
"MIT"
] | null | null | null |
import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from pydantic import BaseModel
from pydantic.errors import ConfigError, DictError
from pydantic.fields import FieldInfo as PydanticFieldInfo
from pydantic.fields import ModelField, Undefined, UndefinedType
from pydantic.main import BaseConfig, ModelMetaclass, validate_model
from pydantic.typing import ForwardRef, NoArgAnyCallable, resolve_annotations
from pydantic.utils import ROOT_KEY, Representation
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy.orm import RelationshipProperty, declared_attr, registry, relationship
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from .sql.sqltypes import GUID, AutoString
_T = TypeVar("_T")
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[_T], _T]:
return lambda a: a
class FieldInfo(PydanticFieldInfo):
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
primary_key = kwargs.pop("primary_key", False)
nullable = kwargs.pop("nullable", Undefined)
foreign_key = kwargs.pop("foreign_key", Undefined)
index = kwargs.pop("index", Undefined)
sa_column = kwargs.pop("sa_column", Undefined)
sa_column_args = kwargs.pop("sa_column_args", Undefined)
sa_column_kwargs = kwargs.pop("sa_column_kwargs", Undefined)
if sa_column is not Undefined:
if sa_column_args is not Undefined:
raise RuntimeError(
"Passing sa_column_args is not supported when "
"also passing a sa_column"
)
if sa_column_kwargs is not Undefined:
raise RuntimeError(
"Passing sa_column_kwargs is not supported when "
"also passing a sa_column"
)
super().__init__(default=default, **kwargs)
self.primary_key = primary_key
self.nullable = nullable
self.foreign_key = foreign_key
self.index = index
self.sa_column = sa_column
self.sa_column_args = sa_column_args
self.sa_column_kwargs = sa_column_kwargs
class RelationshipInfo(Representation):
def __init__(
self,
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship: Optional[RelationshipProperty] = None, # type: ignore
sa_relationship_args: Optional[Sequence[Any]] = None,
sa_relationship_kwargs: Optional[Mapping[str, Any]] = None,
) -> None:
if sa_relationship is not None:
if sa_relationship_args is not None:
raise RuntimeError(
"Passing sa_relationship_args is not supported when "
"also passing a sa_relationship"
)
if sa_relationship_kwargs is not None:
raise RuntimeError(
"Passing sa_relationship_kwargs is not supported when "
"also passing a sa_relationship"
)
self.back_populates = back_populates
self.link_model = link_model
self.sa_relationship = sa_relationship
self.sa_relationship_args = sa_relationship_args
self.sa_relationship_kwargs = sa_relationship_kwargs
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
exclude: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
include: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
const: Optional[bool] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
multiple_of: Optional[float] = None,
min_items: Optional[int] = None,
max_items: Optional[int] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
allow_mutation: bool = True,
regex: Optional[str] = None,
primary_key: bool = False,
foreign_key: Optional[Any] = None,
nullable: Union[bool, UndefinedType] = Undefined,
index: Union[bool, UndefinedType] = Undefined,
sa_column: Union[Column, UndefinedType] = Undefined, # type: ignore
sa_column_args: Union[Sequence[Any], UndefinedType] = Undefined,
sa_column_kwargs: Union[Mapping[str, Any], UndefinedType] = Undefined,
schema_extra: Optional[Dict[str, Any]] = None,
) -> Any:
current_schema_extra = schema_extra or {}
field_info = FieldInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclude=exclude,
include=include,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
min_items=min_items,
max_items=max_items,
min_length=min_length,
max_length=max_length,
allow_mutation=allow_mutation,
regex=regex,
primary_key=primary_key,
foreign_key=foreign_key,
nullable=nullable,
index=index,
sa_column=sa_column,
sa_column_args=sa_column_args,
sa_column_kwargs=sa_column_kwargs,
**current_schema_extra,
)
field_info._validate()
return field_info
def Relationship(
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship: Optional[RelationshipProperty] = None, # type: ignore
sa_relationship_args: Optional[Sequence[Any]] = None,
sa_relationship_kwargs: Optional[Mapping[str, Any]] = None,
) -> Any:
relationship_info = RelationshipInfo(
back_populates=back_populates,
link_model=link_model,
sa_relationship=sa_relationship,
sa_relationship_args=sa_relationship_args,
sa_relationship_kwargs=sa_relationship_kwargs,
)
return relationship_info
@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo))
class SQLModelMetaclass(ModelMetaclass, DeclarativeMeta):
__sqlmodel_relationships__: Dict[str, RelationshipInfo]
__config__: Type[BaseConfig]
__fields__: Dict[str, ModelField]
# Replicate SQLAlchemy
def __setattr__(cls, name: str, value: Any) -> None:
if getattr(cls.__config__, "table", False):
DeclarativeMeta.__setattr__(cls, name, value)
else:
super().__setattr__(name, value)
def __delattr__(cls, name: str) -> None:
if getattr(cls.__config__, "table", False):
DeclarativeMeta.__delattr__(cls, name)
else:
super().__delattr__(name)
# From Pydantic
def __new__(
cls,
name: str,
bases: Tuple[Type[Any], ...],
class_dict: Dict[str, Any],
**kwargs: Any,
) -> Any:
relationships: Dict[str, RelationshipInfo] = {}
dict_for_pydantic = {}
original_annotations = resolve_annotations(
class_dict.get("__annotations__", {}), class_dict.get("__module__", None)
)
pydantic_annotations = {}
relationship_annotations = {}
for k, v in class_dict.items():
if isinstance(v, RelationshipInfo):
relationships[k] = v
else:
dict_for_pydantic[k] = v
for k, v in original_annotations.items():
if k in relationships:
relationship_annotations[k] = v
else:
pydantic_annotations[k] = v
dict_used = {
**dict_for_pydantic,
"__weakref__": None,
"__sqlmodel_relationships__": relationships,
"__annotations__": pydantic_annotations,
}
# Duplicate logic from Pydantic to filter config kwargs because if they are
# passed directly including the registry Pydantic will pass them over to the
# superclass causing an error
allowed_config_kwargs: Set[str] = {
key
for key in dir(BaseConfig)
if not (
key.startswith("__") and key.endswith("__")
) # skip dunder methods and attributes
}
pydantic_kwargs = kwargs.copy()
config_kwargs = {
key: pydantic_kwargs.pop(key)
for key in pydantic_kwargs.keys() & allowed_config_kwargs
}
new_cls = super().__new__(cls, name, bases, dict_used, **config_kwargs)
new_cls.__annotations__ = {
**relationship_annotations,
**pydantic_annotations,
**new_cls.__annotations__,
}
def get_config(name: str) -> Any:
config_class_value = getattr(new_cls.__config__, name, Undefined)
if config_class_value is not Undefined:
return config_class_value
kwarg_value = kwargs.get(name, Undefined)
if kwarg_value is not Undefined:
return kwarg_value
return Undefined
config_table = get_config("table")
if config_table is True:
# If it was passed by kwargs, ensure it's also set in config
new_cls.__config__.table = config_table
for k, v in new_cls.__fields__.items():
col = get_column_from_field(v)
setattr(new_cls, k, col)
# Set a config flag to tell FastAPI that this should be read with a field
# in orm_mode instead of preemptively converting it to a dict.
# This could be done by reading new_cls.__config__.table in FastAPI, but
# that's very specific about SQLModel, so let's have another config that
# other future tools based on Pydantic can use.
new_cls.__config__.read_with_orm_mode = True
config_registry = get_config("registry")
if config_registry is not Undefined:
config_registry = cast(registry, config_registry)
# If it was passed by kwargs, ensure it's also set in config
new_cls.__config__.registry = config_table
setattr(new_cls, "_sa_registry", config_registry)
setattr(new_cls, "metadata", config_registry.metadata)
setattr(new_cls, "__abstract__", True)
return new_cls
# Override SQLAlchemy, allow both SQLAlchemy and plain Pydantic models
def __init__(
cls, classname: str, bases: Tuple[type, ...], dict_: Dict[str, Any], **kw: Any
) -> None:
# Only one of the base classes (or the current one) should be a table model
# this allows FastAPI cloning a SQLModel for the response_model without
# trying to create a new SQLAlchemy, for a new table, with the same name, that
# triggers an error
base_is_table = False
for base in bases:
config = getattr(base, "__config__")
if config and getattr(config, "table", False):
base_is_table = True
break
if getattr(cls.__config__, "table", False) and not base_is_table:
dict_used = dict_.copy()
for field_name, field_value in cls.__fields__.items():
dict_used[field_name] = get_column_from_field(field_value)
for rel_name, rel_info in cls.__sqlmodel_relationships__.items():
if rel_info.sa_relationship:
# There's a SQLAlchemy relationship declared, that takes precedence
# over anything else, use that and continue with the next attribute
dict_used[rel_name] = rel_info.sa_relationship
continue
ann = cls.__annotations__[rel_name]
temp_field = ModelField.infer(
name=rel_name,
value=rel_info,
annotation=ann,
class_validators=None,
config=BaseConfig,
)
relationship_to = temp_field.type_
if isinstance(temp_field.type_, ForwardRef):
relationship_to = temp_field.type_.__forward_arg__
rel_kwargs: Dict[str, Any] = {}
if rel_info.back_populates:
rel_kwargs["back_populates"] = rel_info.back_populates
if rel_info.link_model:
ins = inspect(rel_info.link_model)
local_table = getattr(ins, "local_table")
if local_table is None:
raise RuntimeError(
"Couldn't find the secondary table for "
f"model {rel_info.link_model}"
)
rel_kwargs["secondary"] = local_table
rel_args: List[Any] = []
if rel_info.sa_relationship_args:
rel_args.extend(rel_info.sa_relationship_args)
if rel_info.sa_relationship_kwargs:
rel_kwargs.update(rel_info.sa_relationship_kwargs)
rel_value: RelationshipProperty = relationship( # type: ignore
relationship_to, *rel_args, **rel_kwargs
)
dict_used[rel_name] = rel_value
DeclarativeMeta.__init__(cls, classname, bases, dict_used, **kw)
else:
ModelMetaclass.__init__(cls, classname, bases, dict_, **kw)
def get_sqlachemy_type(field: ModelField) -> Any:
if issubclass(field.type_, str):
if field.field_info.max_length:
return AutoString(length=field.field_info.max_length)
return AutoString
if issubclass(field.type_, float):
return Float
if issubclass(field.type_, bool):
return Boolean
if issubclass(field.type_, int):
return Integer
if issubclass(field.type_, datetime):
return DateTime
if issubclass(field.type_, date):
return Date
if issubclass(field.type_, timedelta):
return Interval
if issubclass(field.type_, time):
return Time
if issubclass(field.type_, Enum):
return Enum
if issubclass(field.type_, bytes):
return LargeBinary
if issubclass(field.type_, Decimal):
return Numeric(
precision=getattr(field.type_, "max_digits", None),
scale=getattr(field.type_, "decimal_places", None),
)
if issubclass(field.type_, ipaddress.IPv4Address):
return AutoString
if issubclass(field.type_, ipaddress.IPv4Network):
return AutoString
if issubclass(field.type_, ipaddress.IPv6Address):
return AutoString
if issubclass(field.type_, ipaddress.IPv6Network):
return AutoString
if issubclass(field.type_, Path):
return AutoString
if issubclass(field.type_, uuid.UUID):
return GUID
def get_column_from_field(field: ModelField) -> Column: # type: ignore
sa_column = getattr(field.field_info, "sa_column", Undefined)
if isinstance(sa_column, Column):
return sa_column
sa_type = get_sqlachemy_type(field)
primary_key = getattr(field.field_info, "primary_key", False)
nullable = not field.required
index = getattr(field.field_info, "index", Undefined)
if index is Undefined:
index = True
if hasattr(field.field_info, "nullable"):
field_nullable = getattr(field.field_info, "nullable")
if field_nullable != Undefined:
nullable = field_nullable
args = []
foreign_key = getattr(field.field_info, "foreign_key", None)
if foreign_key:
args.append(ForeignKey(foreign_key))
kwargs = {
"primary_key": primary_key,
"nullable": nullable,
"index": index,
}
sa_default = Undefined
if field.field_info.default_factory:
sa_default = field.field_info.default_factory
elif field.field_info.default is not Undefined:
sa_default = field.field_info.default
if sa_default is not Undefined:
kwargs["default"] = sa_default
sa_column_args = getattr(field.field_info, "sa_column_args", Undefined)
if sa_column_args is not Undefined:
args.extend(list(cast(Sequence[Any], sa_column_args)))
sa_column_kwargs = getattr(field.field_info, "sa_column_kwargs", Undefined)
if sa_column_kwargs is not Undefined:
kwargs.update(cast(Dict[Any, Any], sa_column_kwargs))
return Column(sa_type, *args, **kwargs)
class_registry = weakref.WeakValueDictionary() # type: ignore
default_registry = registry()
def _value_items_is_true(v: Any) -> bool:
# Re-implement Pydantic's ValueItems.is_true() as it hasn't been released as of
# the current latest, Pydantic 1.8.2
return v is True or v is ...
_TSQLModel = TypeVar("_TSQLModel", bound="SQLModel")
class SQLModel(BaseModel, metaclass=SQLModelMetaclass, registry=default_registry):
# SQLAlchemy needs to set weakref(s), Pydantic will set the other slots values
__slots__ = ("__weakref__",)
__tablename__: ClassVar[Union[str, Callable[..., str]]]
__sqlmodel_relationships__: ClassVar[Dict[str, RelationshipProperty]] # type: ignore
__name__: ClassVar[str]
metadata: ClassVar[MetaData]
class Config:
orm_mode = True
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
new_object = super().__new__(cls)
# SQLAlchemy doesn't call __init__ on the base class
# Ref: https://docs.sqlalchemy.org/en/14/orm/constructors.html
# Set __fields_set__ here, that would have been set when calling __init__
# in the Pydantic model so that when SQLAlchemy sets attributes that are
# added (e.g. when querying from DB) to the __fields_set__, this already exists
object.__setattr__(new_object, "__fields_set__", set())
return new_object
def __init__(__pydantic_self__, **data: Any) -> None:
# Uses something other than `self` the first arg to allow "self" as a
# settable attribute
if TYPE_CHECKING:
__pydantic_self__.__dict__: Dict[str, Any] = {}
__pydantic_self__.__fields_set__: Set[str] = set()
values, fields_set, validation_error = validate_model(
__pydantic_self__.__class__, data
)
# Only raise errors if not a SQLModel model
if (
not getattr(__pydantic_self__.__config__, "table", False)
and validation_error
):
raise validation_error
# Do not set values as in Pydantic, pass them through setattr, so SQLAlchemy
# can handle them
# object.__setattr__(__pydantic_self__, '__dict__', values)
object.__setattr__(__pydantic_self__, "__fields_set__", fields_set)
for key, value in values.items():
setattr(__pydantic_self__, key, value)
non_pydantic_keys = data.keys() - values.keys()
for key in non_pydantic_keys:
if key in __pydantic_self__.__sqlmodel_relationships__:
setattr(__pydantic_self__, key, data[key])
def __setattr__(self, name: str, value: Any) -> None:
if name in {"_sa_instance_state"}:
self.__dict__[name] = value
return
else:
# Set in SQLAlchemy, before Pydantic to trigger events and updates
if getattr(self.__config__, "table", False):
if is_instrumented(self, name):
set_attribute(self, name, value)
# Set in Pydantic model to trigger possible validation changes, only for
# non relationship values
if name not in self.__sqlmodel_relationships__:
super().__setattr__(name, value)
@classmethod
def from_orm(
cls: Type[_TSQLModel], obj: Any, update: Optional[Dict[str, Any]] = None
) -> _TSQLModel:
# Duplicated from Pydantic
if not cls.__config__.orm_mode:
raise ConfigError(
"You must have the config attribute orm_mode=True to use from_orm"
)
obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj)
# SQLModel, support update dict
if update is not None:
obj = {**obj, **update}
# End SQLModel support dict
if not getattr(cls.__config__, "table", False):
# If not table, normal Pydantic code
m: _TSQLModel = cls.__new__(cls)
else:
# If table, create the new instance normally to make SQLAlchemy create
# the _sa_instance_state attribute
m = cls()
values, fields_set, validation_error = validate_model(cls, obj)
if validation_error:
raise validation_error
# Updated to trigger SQLAlchemy internal handling
if not getattr(cls.__config__, "table", False):
object.__setattr__(m, "__dict__", values)
else:
for key, value in values.items():
setattr(m, key, value)
# Continue with standard Pydantic logic
object.__setattr__(m, "__fields_set__", fields_set)
m._init_private_attributes()
return m
@classmethod
def parse_obj(
cls: Type["SQLModel"], obj: Any, update: Optional[Dict[str, Any]] = None
) -> "SQLModel":
obj = cls._enforce_dict_if_root(obj)
# SQLModel, support update dict
if update is not None:
obj = {**obj, **update}
# End SQLModel support dict
return super().parse_obj(obj)
def __repr_args__(self) -> Sequence[Tuple[Optional[str], Any]]:
# Don't show SQLAlchemy private attributes
return [(k, v) for k, v in self.__dict__.items() if not k.startswith("_sa_")]
# From Pydantic, override to enforce validation with dict
@classmethod
def validate(cls: Type["SQLModel"], value: Any) -> "SQLModel":
if isinstance(value, cls):
return value.copy() if cls.__config__.copy_on_model_validation else value
value = cls._enforce_dict_if_root(value)
if isinstance(value, dict):
values, fields_set, validation_error = validate_model(cls, value)
if validation_error:
raise validation_error
model = cls(**values)
# Reset fields set, this would have been done in Pydantic in __init__
object.__setattr__(model, "__fields_set__", fields_set)
return model
elif cls.__config__.orm_mode:
return cls.from_orm(value)
elif cls.__custom_root_type__:
return cls.parse_obj(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
# From Pydantic, override to only show keys from fields, omit SQLAlchemy attributes
def _calculate_keys( # type: ignore
self,
include: Optional[Mapping[Union[int, str], Any]],
exclude: Optional[Mapping[Union[int, str], Any]],
exclude_unset: bool,
update: Optional[Dict[str, Any]] = None,
) -> Optional[AbstractSet[str]]:
if include is None and exclude is None and exclude_unset is False:
# Original in Pydantic:
# return None
# Updated to not return SQLAlchemy attributes
# Do not include relationships as that would easily lead to infinite
# recursion, or traversing the whole database
return self.__fields__.keys() # | self.__sqlmodel_relationships__.keys()
keys: AbstractSet[str]
if exclude_unset:
keys = self.__fields_set__.copy()
else:
# Original in Pydantic:
# keys = self.__dict__.keys()
# Updated to not return SQLAlchemy attributes
# Do not include relationships as that would easily lead to infinite
# recursion, or traversing the whole database
keys = self.__fields__.keys() # | self.__sqlmodel_relationships__.keys()
if include is not None:
keys &= include.keys()
if update:
keys -= update.keys()
if exclude:
keys -= {k for k, v in exclude.items() if _value_items_is_true(v)}
return keys
@declared_attr # type: ignore
def __tablename__(cls) -> str:
return cls.__name__.lower()
| 38.84639
| 89
| 0.629444
|
a0af4bb3452207c8a415525c6a5d4b71c8bb2a95
| 2,086
|
py
|
Python
|
pimdm/tree/originator.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 6
|
2020-02-04T20:59:59.000Z
|
2021-11-24T09:56:07.000Z
|
pimdm/tree/originator.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 4
|
2020-04-10T14:51:39.000Z
|
2022-02-14T00:59:21.000Z
|
pimdm/tree/originator.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 3
|
2020-08-13T17:56:35.000Z
|
2021-11-24T11:03:12.000Z
|
from abc import ABCMeta, abstractmethod
class OriginatorStateABC(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def recvDataMsgFromSource(tree):
pass
@staticmethod
@abstractmethod
def SRTexpires(tree):
pass
@staticmethod
@abstractmethod
def SATexpires(tree):
pass
@staticmethod
@abstractmethod
def SourceNotConnected(tree):
pass
class Originator(OriginatorStateABC):
@staticmethod
def recvDataMsgFromSource(tree):
tree.set_source_active_timer()
@staticmethod
def SRTexpires(tree):
'''
@type tree: Tree
'''
tree.originator_logger.debug('SRT expired, O -> O')
tree.set_state_refresh_timer()
tree.create_state_refresh_msg()
@staticmethod
def SATexpires(tree):
tree.originator_logger.debug('SAT expired, O -> NO')
tree.clear_state_refresh_timer()
tree.set_originator_state(OriginatorState.NotOriginator)
@staticmethod
def SourceNotConnected(tree):
tree.originator_logger.debug('Source no longer directly connected, O -> NO')
tree.clear_state_refresh_timer()
tree.clear_source_active_timer()
tree.set_originator_state(OriginatorState.NotOriginator)
def __str__(self):
return 'Originator'
class NotOriginator(OriginatorStateABC):
@staticmethod
def recvDataMsgFromSource(tree):
'''
@type interface: Tree
'''
tree.originator_logger.debug('new DataMsg from Source, NO -> O')
tree.set_originator_state(OriginatorState.Originator)
tree.set_state_refresh_timer()
tree.set_source_active_timer()
@staticmethod
def SRTexpires(tree):
assert False, "SRTexpires in NO"
@staticmethod
def SATexpires(tree):
assert False, "SATexpires in NO"
@staticmethod
def SourceNotConnected(tree):
return
def __str__(self):
return 'NotOriginator'
class OriginatorState():
NotOriginator = NotOriginator()
Originator = Originator()
| 23.977011
| 84
| 0.668744
|
cc74887466b092d6f472dbf4f02918a07c1f10ed
| 359
|
py
|
Python
|
plottwist/toolsets/__init__.py
|
Plot-Twist-Short-Film/plottwist-core
|
44cc770c83602004f3bdf5cd0027a9d189ae718d
|
[
"MIT"
] | null | null | null |
plottwist/toolsets/__init__.py
|
Plot-Twist-Short-Film/plottwist-core
|
44cc770c83602004f3bdf5cd0027a9d189ae718d
|
[
"MIT"
] | 1
|
2020-03-08T20:24:43.000Z
|
2020-03-08T20:24:43.000Z
|
plottwist/toolsets/__init__.py
|
Plot-Twist-Short-Film/plottwist-core
|
44cc770c83602004f3bdf5cd0027a9d189ae718d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initialization module for plottwist-toolsets
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 19.944444
| 64
| 0.754875
|
6841c240e7cd31af16ae409516f83cef40efa953
| 922
|
py
|
Python
|
example/project/chat/sockserver.py
|
NextGear/django-sockjs-tornado
|
dd52eec942f7fa457ed331fffaeee294f888bd1f
|
[
"MIT"
] | 25
|
2015-01-11T04:35:08.000Z
|
2020-07-12T08:51:47.000Z
|
example/project/chat/sockserver.py
|
NextGear/django-sockjs-tornado
|
dd52eec942f7fa457ed331fffaeee294f888bd1f
|
[
"MIT"
] | null | null | null |
example/project/chat/sockserver.py
|
NextGear/django-sockjs-tornado
|
dd52eec942f7fa457ed331fffaeee294f888bd1f
|
[
"MIT"
] | 4
|
2015-06-19T11:52:14.000Z
|
2017-09-30T08:52:56.000Z
|
import json
from sockjs.tornado import SockJSConnection
from .models import Message
class ChatConnection(SockJSConnection):
_connected = set()
def on_open(self, request):
#print "OPEN"
#print request.get_cookie('name')
self._connected.add(self)
for each in Message.objects.all().order_by('date')[:10]:
self.send(self._package_message(each))
def on_message(self, data):
data = json.loads(data)
#print "DATA", repr(data)
msg = Message.objects.create(
name=data['name'],
message=data['message']
)
self.broadcast(self._connected, self._package_message(msg))
def on_close(self):
#print "CLOSE"
self._connected.remove(self)
def _package_message(self, m):
return {'date': m.date.strftime('%H:%M:%S'),
'message': m.message,
'name': m.name}
| 27.939394
| 67
| 0.596529
|
06b478cf3e6bee5c25855be86545c6caf1f45886
| 515
|
py
|
Python
|
api/views.py
|
RRakib/django-boilerplate
|
55c07384202643613e29342eb387e4a9a661c806
|
[
"Apache-2.0"
] | 1
|
2021-08-16T10:53:03.000Z
|
2021-08-16T10:53:03.000Z
|
api/views.py
|
Ahnaf/myproject
|
86b1c4b56f7df30a1c38e5ba6c71255d4bc65b4b
|
[
"Apache-2.0"
] | 9
|
2020-03-24T17:46:49.000Z
|
2021-08-23T20:27:47.000Z
|
api/views.py
|
Ahnaf/myproject
|
86b1c4b56f7df30a1c38e5ba6c71255d4bc65b4b
|
[
"Apache-2.0"
] | 1
|
2021-08-16T10:53:04.000Z
|
2021-08-16T10:53:04.000Z
|
from rest_framework import viewsets
from .models import DemoPurpose
from .serializers import DemoSerializer
class DemoViewSet(viewsets.ModelViewSet):
queryset = DemoPurpose.objects.all()
serializer_class = DemoSerializer
def get_serializer(self, *args, **kwargs):
if "data" in kwargs:
data = kwargs["data"]
# check if many is required
if isinstance(data, list):
kwargs["many"] = True
return super(DemoViewSet, self).get_serializer(*args, **kwargs)
| 27.105263
| 68
| 0.685437
|
370066678d5dff42c8934c3485fa972c403b1375
| 51
|
py
|
Python
|
autox/autox_recommend/recall_and_rank/ranker/__init__.py
|
OneToolsCollection/4paradigm-AutoX
|
f8e838021354de17f5bb9bc44e9d68d12dda6427
|
[
"Apache-2.0"
] | null | null | null |
autox/autox_recommend/recall_and_rank/ranker/__init__.py
|
OneToolsCollection/4paradigm-AutoX
|
f8e838021354de17f5bb9bc44e9d68d12dda6427
|
[
"Apache-2.0"
] | null | null | null |
autox/autox_recommend/recall_and_rank/ranker/__init__.py
|
OneToolsCollection/4paradigm-AutoX
|
f8e838021354de17f5bb9bc44e9d68d12dda6427
|
[
"Apache-2.0"
] | null | null | null |
from .ranker import ranker, ranker_test, inference
| 25.5
| 50
| 0.823529
|
b296ffd2e4d63c5de4d06ef5fc0dbe27f16ac3d1
| 1,082
|
py
|
Python
|
tests/test_build/test_combine.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 742
|
2015-01-03T21:46:14.000Z
|
2022-03-27T05:49:32.000Z
|
tests/test_build/test_combine.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 64
|
2015-01-06T01:34:12.000Z
|
2020-05-07T21:52:11.000Z
|
tests/test_build/test_combine.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 96
|
2015-02-26T16:42:42.000Z
|
2022-02-06T14:00:24.000Z
|
"""Test function in module."""
import pytest
from terminaltables.build import combine
@pytest.mark.parametrize('generator', [False, True])
def test_borders(generator):
"""Test with borders.
:param bool generator: Test with generator instead of list.
"""
line = ['One', 'Two', 'Three']
actual = list(combine(iter(line) if generator else line, '>', '|', '<'))
assert actual == ['>', 'One', '|', 'Two', '|', 'Three', '<']
@pytest.mark.parametrize('generator', [False, True])
def test_no_border(generator):
"""Test without borders.
:param bool generator: Test with generator instead of list.
"""
line = ['One', 'Two', 'Three']
actual = list(combine(iter(line) if generator else line, '', '', ''))
assert actual == ['One', 'Two', 'Three']
@pytest.mark.parametrize('generator', [False, True])
def test_no_items(generator):
"""Test with empty list.
:param bool generator: Test with generator instead of list.
"""
actual = list(combine(iter([]) if generator else [], '>', '|', '<'))
assert actual == ['>', '<']
| 28.473684
| 76
| 0.615527
|
f563cc8e30bd9a21a47284297ee1bc28b4da0fa4
| 8,922
|
py
|
Python
|
isi_sdk_8_2_2/isi_sdk_8_2_2/models/auth_access_access_item_share_share_permissions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_2/isi_sdk_8_2_2/models/auth_access_access_item_share_share_permissions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_2/isi_sdk_8_2_2/models/auth_access_access_item_share_share_permissions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.auth_access_access_item_share_share_permissions_share_relevant_ace import AuthAccessAccessItemShareSharePermissionsShareRelevantAce # noqa: F401,E501
class AuthAccessAccessItemShareSharePermissions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'expected_permissions': 'str',
'impersonate_guest': 'bool',
'impersonate_user': 'bool',
'run_as_root': 'bool',
'share_relevant_aces': 'list[AuthAccessAccessItemShareSharePermissionsShareRelevantAce]'
}
attribute_map = {
'expected_permissions': 'expected_permissions',
'impersonate_guest': 'impersonate_guest',
'impersonate_user': 'impersonate_user',
'run_as_root': 'run_as_root',
'share_relevant_aces': 'share_relevant_aces'
}
def __init__(self, expected_permissions=None, impersonate_guest=None, impersonate_user=None, run_as_root=None, share_relevant_aces=None): # noqa: E501
"""AuthAccessAccessItemShareSharePermissions - a model defined in Swagger""" # noqa: E501
self._expected_permissions = None
self._impersonate_guest = None
self._impersonate_user = None
self._run_as_root = None
self._share_relevant_aces = None
self.discriminator = None
if expected_permissions is not None:
self.expected_permissions = expected_permissions
if impersonate_guest is not None:
self.impersonate_guest = impersonate_guest
if impersonate_user is not None:
self.impersonate_user = impersonate_user
if run_as_root is not None:
self.run_as_root = run_as_root
if share_relevant_aces is not None:
self.share_relevant_aces = share_relevant_aces
@property
def expected_permissions(self):
"""Gets the expected_permissions of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
Returns Share level permissions for the user.{ 'read' , 'write' , 'full' or 'none' will be the values} # noqa: E501
:return: The expected_permissions of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:rtype: str
"""
return self._expected_permissions
@expected_permissions.setter
def expected_permissions(self, expected_permissions):
"""Sets the expected_permissions of this AuthAccessAccessItemShareSharePermissions.
Returns Share level permissions for the user.{ 'read' , 'write' , 'full' or 'none' will be the values} # noqa: E501
:param expected_permissions: The expected_permissions of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:type: str
"""
if expected_permissions is not None and len(expected_permissions) > 255:
raise ValueError("Invalid value for `expected_permissions`, length must be less than or equal to `255`") # noqa: E501
if expected_permissions is not None and len(expected_permissions) < 0:
raise ValueError("Invalid value for `expected_permissions`, length must be greater than or equal to `0`") # noqa: E501
self._expected_permissions = expected_permissions
@property
def impersonate_guest(self):
"""Gets the impersonate_guest of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
Returns whether impersonate guest setting is enabled for the user on the share. # noqa: E501
:return: The impersonate_guest of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:rtype: bool
"""
return self._impersonate_guest
@impersonate_guest.setter
def impersonate_guest(self, impersonate_guest):
"""Sets the impersonate_guest of this AuthAccessAccessItemShareSharePermissions.
Returns whether impersonate guest setting is enabled for the user on the share. # noqa: E501
:param impersonate_guest: The impersonate_guest of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:type: bool
"""
self._impersonate_guest = impersonate_guest
@property
def impersonate_user(self):
"""Gets the impersonate_user of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
Returns whether impersonate user setting is enabled on the share # noqa: E501
:return: The impersonate_user of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:rtype: bool
"""
return self._impersonate_user
@impersonate_user.setter
def impersonate_user(self, impersonate_user):
"""Sets the impersonate_user of this AuthAccessAccessItemShareSharePermissions.
Returns whether impersonate user setting is enabled on the share # noqa: E501
:param impersonate_user: The impersonate_user of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:type: bool
"""
self._impersonate_user = impersonate_user
@property
def run_as_root(self):
"""Gets the run_as_root of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
Returns whether run as root is enabled for the user on the share # noqa: E501
:return: The run_as_root of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:rtype: bool
"""
return self._run_as_root
@run_as_root.setter
def run_as_root(self, run_as_root):
"""Sets the run_as_root of this AuthAccessAccessItemShareSharePermissions.
Returns whether run as root is enabled for the user on the share # noqa: E501
:param run_as_root: The run_as_root of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:type: bool
"""
self._run_as_root = run_as_root
@property
def share_relevant_aces(self):
"""Gets the share_relevant_aces of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
Specifies a list of the relevant Access Control Entries withrespect to the user in the share. # noqa: E501
:return: The share_relevant_aces of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:rtype: list[AuthAccessAccessItemShareSharePermissionsShareRelevantAce]
"""
return self._share_relevant_aces
@share_relevant_aces.setter
def share_relevant_aces(self, share_relevant_aces):
"""Sets the share_relevant_aces of this AuthAccessAccessItemShareSharePermissions.
Specifies a list of the relevant Access Control Entries withrespect to the user in the share. # noqa: E501
:param share_relevant_aces: The share_relevant_aces of this AuthAccessAccessItemShareSharePermissions. # noqa: E501
:type: list[AuthAccessAccessItemShareSharePermissionsShareRelevantAce]
"""
self._share_relevant_aces = share_relevant_aces
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthAccessAccessItemShareSharePermissions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.291845
| 176
| 0.676754
|
82621e66f64a2685b75911a75f18dd3ecb408f38
| 805
|
py
|
Python
|
{{cookiecutter.repo_name}}/project/apps/geo_locator/tests.py
|
mindcube/mindcube-django-cookiecutter
|
8ac8a713c1b091c69488c0e545d066da19154cc5
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/project/apps/geo_locator/tests.py
|
mindcube/mindcube-django-cookiecutter
|
8ac8a713c1b091c69488c0e545d066da19154cc5
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/project/apps/geo_locator/tests.py
|
mindcube/mindcube-django-cookiecutter
|
8ac8a713c1b091c69488c0e545d066da19154cc5
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.test import Client
class SimpleTest(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
def test_details(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_geolocation_real_ip(self):
# test with a known ip address
response = self.client.get('/', REMOTE_ADDR='172.249.173.233')
self.assertEqual(response.context['location']['latitude'], 34.1125)
self.assertEqual(response.context['location']['longitude'], -118.1908)
def test_geolocation_local_ip(self):
# test with local IP
response = self.client.get('/', REMOTE_ADDR='127.0.0.1')
self.assertIsNone(response.context['location'])
| 29.814815
| 78
| 0.659627
|
5adbc7721de1bd6037e782a77cc228ea82bc80e5
| 8,755
|
py
|
Python
|
tests/test_params_set.py
|
FaustinCarter/lmfit-py
|
7fbb75b2fd3f383e78692fd85c9a646793d4b071
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_params_set.py
|
FaustinCarter/lmfit-py
|
7fbb75b2fd3f383e78692fd85c9a646793d4b071
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_params_set.py
|
FaustinCarter/lmfit-py
|
7fbb75b2fd3f383e78692fd85c9a646793d4b071
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_allclose
from lmfit import Parameters, minimize, report_fit
from lmfit.lineshapes import gaussian
from lmfit.models import VoigtModel
def test_param_set():
np.random.seed(2015)
x = np.arange(0, 20, 0.05)
y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13)
y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x))
model = VoigtModel()
params = model.guess(y, x=x)
# test #1: gamma is constrained to equal sigma
assert(params['gamma'].expr == 'sigma')
params.update_constraints()
sigval = params['sigma'].value
assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
# test #2: explicitly setting a param value should work, even when
# it had been an expression. The value will be left as fixed
gamval = 0.87543
params['gamma'].set(value=gamval)
assert(params['gamma'].expr is None)
assert(not params['gamma'].vary)
assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
# test #3: explicitly setting an expression should work
# Note, the only way to ensure that **ALL** constraints are up to date
# is to call params.update_constraints(). This is because the constraint
# may have multiple dependencies.
params['gamma'].set(expr='sigma/2.0')
assert(params['gamma'].expr is not None)
assert(not params['gamma'].vary)
params.update_constraints()
assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True)
# test #4: explicitly setting a param value WITH vary=True
# will set it to be variable
gamval = 0.7777
params['gamma'].set(value=gamval, vary=True)
assert(params['gamma'].expr is None)
assert(params['gamma'].vary)
assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
# test 5: make sure issue #389 is fixed: set boundaries and make sure
# they are kept when changing the value
amplitude_vary = params['amplitude'].vary
amplitude_expr = params['amplitude'].expr
params['amplitude'].set(min=0.0, max=100.0)
params.update_constraints()
assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
params['amplitude'].set(value=40.0)
params.update_constraints()
assert_allclose(params['amplitude'].value, 40.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].expr == amplitude_expr)
assert(params['amplitude'].vary == amplitude_vary)
assert(not params['amplitude'].brute_step)
# test for possible regressions of this fix (without 'expr'):
# the set function should only change the requested attribute(s)
params['amplitude'].set(value=35.0)
params.update_constraints()
assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].vary == amplitude_vary)
assert(params['amplitude'].expr == amplitude_expr)
assert(not params['amplitude'].brute_step)
# set minimum
params['amplitude'].set(min=10.0)
params.update_constraints()
assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].vary == amplitude_vary)
assert(params['amplitude'].expr == amplitude_expr)
assert(not params['amplitude'].brute_step)
# set maximum
params['amplitude'].set(max=110.0)
params.update_constraints()
assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].vary == amplitude_vary)
assert(params['amplitude'].expr == amplitude_expr)
assert(not params['amplitude'].brute_step)
# set vary
params['amplitude'].set(vary=False)
params.update_constraints()
assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].vary == False)
assert(params['amplitude'].expr == amplitude_expr)
assert(not params['amplitude'].brute_step)
# set brute_step
params['amplitude'].set(brute_step=0.1)
params.update_constraints()
assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
assert(params['amplitude'].vary == False)
assert(params['amplitude'].expr == amplitude_expr)
assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
# test for possible regressions of this fix for variables WITH 'expr':
height_value = params['height'].value
height_min = params['height'].min
height_max = params['height'].max
height_vary = params['height'].vary
height_expr = params['height'].expr
height_brute_step = params['height'].brute_step
# set vary=True should remove expression
params['height'].set(vary=True)
params.update_constraints()
assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == True)
assert(params['height'].expr == None)
assert(params['height'].brute_step == height_brute_step)
# setting an expression should set vary=False
params['height'].set(expr=height_expr)
params.update_constraints()
assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == False)
assert(params['height'].expr == height_expr)
assert(params['height'].brute_step == height_brute_step)
# changing min/max should not remove expression
params['height'].set(min=0)
params.update_constraints()
assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == height_vary)
assert(params['height'].expr == height_expr)
assert(params['height'].brute_step == height_brute_step)
# changing brute_step should not remove expression
params['height'].set(brute_step=0.1)
params.update_constraints()
assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == height_vary)
assert(params['height'].expr == height_expr)
assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
# changing the value should remove expression and keep vary=False
params['height'].set(brute_step=0)
params['height'].set(value=10.0)
params.update_constraints()
assert_allclose(params['height'].value, 10.0, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == False)
assert(params['height'].expr == None)
assert(params['height'].brute_step == height_brute_step)
# passing expr='' should only remove the expression
params['height'].set(expr=height_expr) # first restore the original expr
params.update_constraints()
params['height'].set(expr='')
params.update_constraints()
assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
assert(params['height'].vary == False)
assert(params['height'].expr == None)
assert(params['height'].brute_step == height_brute_step)
test_param_set()
| 46.569149
| 79
| 0.671845
|
8a03db01273afac0956c892af20b3c57b47a3085
| 9,806
|
py
|
Python
|
src/visual_data_handlers.py
|
nickgkan/beauty_detr
|
502ee85e35c0a8a488fc71bb301e8c5bb1827250
|
[
"MIT"
] | 18
|
2021-12-16T04:41:55.000Z
|
2022-02-24T12:53:22.000Z
|
src/visual_data_handlers.py
|
nickgkan/beauty_detr
|
502ee85e35c0a8a488fc71bb301e8c5bb1827250
|
[
"MIT"
] | null | null | null |
src/visual_data_handlers.py
|
nickgkan/beauty_detr
|
502ee85e35c0a8a488fc71bb301e8c5bb1827250
|
[
"MIT"
] | 1
|
2021-12-19T12:53:28.000Z
|
2021-12-19T12:53:28.000Z
|
# ------------------------------------------------------------------------
# BEAUTY DETR
# Copyright (c) 2022 Ayush Jain & Nikolaos Gkanatsios
# Licensed under CC-BY-NC [see LICENSE for details]
# All Rights Reserved
# ------------------------------------------------------------------------
"""Classes for ScanNet datasets."""
from collections import defaultdict
import json
import os.path as osp
import numpy as np
from plyfile import PlyData
class ScanNetMappings:
"""Holds ScanNet dataset mappings."""
def __init__(self):
"""Load ScanNet files for classes/rotations/etc."""
folder = 'data/meta_data/'
with open(folder + 'scannet_idx_to_semantic_class.json') as fid:
self.idx_to_semantic_cls_dict = json.load(fid)
self.semantic_cls_to_idx_dict = {
v: k for k, v in self.idx_to_semantic_cls_dict.items()
}
with open(
folder + 'scannet_instance_class_to_semantic_class.json'
) as fid:
self.instance_cls_to_semantic_cls_dict = json.load(fid)
with open(folder + 'scans_axis_alignment_matrices.json') as fid:
self.scans_axis_alignment_mats = json.load(fid)
def idx_to_semantic_cls(self, semantic_idx):
"""
Return class name given class index.
{'1': 'wall', '2': 'floor'}
"""
return self.idx_to_semantic_cls_dict[str(semantic_idx)]
def semantic_cls_to_idx(self, semantic_cls):
"""
Return class index given class name.
{'wall': '1', 'floor': '2'}
"""
return self.semantic_cls_to_idx_dict[str(semantic_cls)]
def instance_cls_to_semantic_cls(self, instance_cls):
"""
Return super-class name given class name.
{'air hockey table': 'table', 'airplane': 'otherprop'}
"""
return self.instance_cls_to_semantic_cls_dict[str(instance_cls)]
def get_axis_alignment_matrix(self, scan_id):
"""
Return axis alignment matrix givenscan id.
{'scan_id': rotation matrix}
"""
return np.array(self.scans_axis_alignment_mats[scan_id]).reshape(4, 4)
class Scan:
"""Scan class for ScanNet."""
def __init__(self, scan_id, top_scan_dir, load_objects=True):
"""Initialize for given scan_id, mappings and ScanNet path."""
self.mappings = ScanNetMappings()
self.scan_id = scan_id
self.top_scan_dir = top_scan_dir
self.choices = None
self.pc, self.semantic_label_idx, self.color = self.load_point_cloud()
self.orig_pc = np.copy(self.pc) # this won't be augmented
self.three_d_objects = None # will save a list of objects here
if load_objects:
self.load_point_clouds_of_all_objects()
def load_point_cloud(self, keep_points=50000):
"""Load point-cloud information."""
# Load labels
label = None
if osp.exists(self.scan_id + '_vh_clean_2.labels.ply'):
data = PlyData.read(osp.join(
self.top_scan_dir,
self.scan_id, self.scan_id + '_vh_clean_2.labels.ply'
))
label = np.asarray(data.elements[0].data['label'])
# Load points and color
data = PlyData.read(osp.join(
self.top_scan_dir,
self.scan_id, self.scan_id + '_vh_clean_2.ply'
))
pc = np.stack([
np.asarray(data.elements[0].data['x']),
np.asarray(data.elements[0].data['y']),
np.asarray(data.elements[0].data['z'])
], axis=1)
pc = self.align_to_axes(pc) # global alignment of the scan
color = (np.stack([
np.asarray(data.elements[0].data['red']),
np.asarray(data.elements[0].data['green']),
np.asarray(data.elements[0].data['blue'])
], axis=1) / 256.0).astype(np.float32)
# Keep a specific number of points
np.random.seed(1184)
choices = np.random.choice(
pc.shape[0],
keep_points,
replace=len(pc) < keep_points
)
self.choices = choices
self.new_pts = np.zeros(len(pc)).astype(int)
self.new_pts[choices] = np.arange(len(choices)).astype(int)
pc = pc[choices]
if label is not None:
label = label[choices]
color = color[choices]
return pc, label, color
def load_point_clouds_of_all_objects(self):
"""Load point clouds for all objects."""
# Load segments
segments_file = osp.join(
self.top_scan_dir,
self.scan_id, self.scan_id + '_vh_clean_2.0.010000.segs.json'
)
with open(segments_file) as fid:
# segment_indices: list of len(self.pc) integers
segment_indices = json.load(fid)['segIndices']
segments = defaultdict(list) # store the indices of each segment
for i, s in enumerate(segment_indices):
segments[s].append(i)
# Aggregation file
aggregation_file = osp.join(
self.top_scan_dir,
self.scan_id, self.scan_id + '.aggregation.json')
with open(aggregation_file) as fid:
scan_aggregation = json.load(fid)
# Iterate over objects
self.three_d_objects = []
for object_info in scan_aggregation['segGroups']:
points = []
for s in object_info['segments']:
points.extend(segments[s])
points = np.array(list(set(points)))
if self.choices is not None:
points = self.new_pts[points[np.isin(points, self.choices)]]
self.three_d_objects.append(dict({
'object_id': int(object_info['objectId']),
'points': np.array(points),
'instance_label': str(object_info['label'])
}))
# Filter duplicate boxes
obj_list = []
for o in range(len(self.three_d_objects)):
if o == 0:
obj_list.append(self.three_d_objects[o])
continue
is_dupl = any(
len(obj['points']) == len(self.three_d_objects[o]['points'])
and (obj['points'] == self.three_d_objects[o]['points']).all()
for obj in self.three_d_objects[:o]
)
if not is_dupl:
obj_list.append(self.three_d_objects[o])
self.three_d_objects = obj_list
def instance_occurrences(self):
"""Retrun {instance_type: number of occurrences in the scan."""
res = defaultdict(int)
for o in self.three_d_objects:
res[o.instance_label] += 1
return res
def align_to_axes(self, point_cloud):
"""Align the scan to xyz axes using its alignment matrix."""
alignment_mat = self.mappings.get_axis_alignment_matrix(self.scan_id)
# Transform the points (homogeneous coordinates)
pts = np.ones((point_cloud.shape[0], 4), dtype=point_cloud.dtype)
pts[:, :3] = point_cloud
return np.dot(pts, alignment_mat.transpose())[:, :3]
def get_object_pc(self, object_id):
"""Get an object's point cloud."""
return self.pc[self.three_d_objects[object_id]['points']]
def get_object_color(self, object_id):
"""Get an object's color point cloud."""
return self.color[self.three_d_objects[object_id]['points']]
def get_object_normalized_pc(self, object_id):
"""Get an object's normalized point cloud."""
return self._normalize_pc(
self.pc[self.three_d_objects[object_id]['points']]
)
def get_object_binarized_pc(self, object_id):
"""Get an object's binarized point cloud."""
return self._binarize_pc(
len(self.pc), self.three_d_objects[object_id]['points']
)
def get_object_instance_label(self, object_id):
"""Get an object's instance label (fine-grained)."""
return self.three_d_objects[object_id]['instance_label']
def get_object_semantic_label(self, object_id):
"""Get an object's semantic label (coarse-grained)."""
one_point = self.three_d_objects[object_id]['points'][0]
idx = self.semantic_label_idx[one_point]
return self.mappings.idx_to_semantic_cls(idx)
def get_object_bbox(self, object_id):
"""Get an object's bounding box."""
return self._set_axis_align_bbox(self.get_object_pc(object_id))
@staticmethod
def _binarize_pc(num_points, inds):
"""Create a binary point cloud of object occupancy."""
bin_pc = np.zeros(num_points)
bin_pc[inds] = 1
return bin_pc
@staticmethod
def _normalize_pc(pc):
"""Normalize the object's point cloud to a unit sphere."""
# Center along mean
point_set = pc - np.expand_dims(np.mean(pc, axis=0), 0)
# Find 'radius'
dist = np.max(np.sqrt(np.sum(point_set ** 2, axis=1)), 0)
return point_set / dist # scale
@staticmethod
def _set_axis_align_bbox(pc):
"""Compute object bounding box."""
pc = pc[:, :3]
max_ = np.max(pc, axis=0)
min_ = np.min(pc, axis=0)
cx, cy, cz = (max_ + min_) / 2.0
lx, ly, lz = max_ - min_
xmin = cx - lx / 2.0
xmax = cx + lx / 2.0
ymin = cy - ly / 2.0
ymax = cy + ly / 2.0
zmin = cz - lz / 2.0
zmax = cz + lz / 2.0
return np.array([xmin, ymin, zmin, xmax, ymax, zmax])
@staticmethod
def _box_cxcyczwhd_to_xyzxyz(x):
x_c, y_c, z_c, w, h, d = x
assert w > 0
assert h > 0
assert d > 0
b = [
x_c - 0.5 * w, y_c - 0.5 * h, z_c - 0.5 * d,
x_c + 0.5 * w, y_c + 0.5 * h, z_c + 0.5 * d
]
return b
| 36.184502
| 78
| 0.58403
|
73942797f417b64400a8ac78ad91e23a19aacf1b
| 2,085
|
py
|
Python
|
dbt_artifacts_loader/dbt/v1/catalog.py
|
yu-iskw/dbt-artifacts-loader
|
cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b
|
[
"Apache-2.0"
] | 10
|
2021-07-07T01:10:02.000Z
|
2022-01-13T10:53:11.000Z
|
dbt_artifacts_loader/dbt/v1/catalog.py
|
yu-iskw/dbt-artifacts-loader
|
cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b
|
[
"Apache-2.0"
] | null | null | null |
dbt_artifacts_loader/dbt/v1/catalog.py
|
yu-iskw/dbt-artifacts-loader
|
cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b
|
[
"Apache-2.0"
] | null | null | null |
# generated by datamodel-codegen:
# filename: catalog.json
# timestamp: 2021-10-09T01:08:42+00:00
from __future__ import annotations
from datetime import datetime
from typing import Dict, List, Optional, Union
from dbt_artifacts_loader.dbt.base_bigquery_model import BaseBigQueryModel
from pydantic import Extra, Field
class CatalogMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
dbt_schema_version: Optional[str] = 'https://schemas.getdbt.com/dbt/catalog/v1.json'
dbt_version: Optional[str] = '0.19.0'
generated_at: Optional[datetime] = '2021-02-10T04:42:33.680487Z'
invocation_id: Optional[Optional[str]] = None
env: Optional[Dict[str, str]] = {}
class TableMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
type: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
name: str
comment: Optional[Optional[str]] = None
owner: Optional[Optional[str]] = None
class ColumnMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
type: str
comment: Optional[Optional[str]] = None
index: int
name: str
class StatsItem(BaseBigQueryModel):
class Config:
extra = Extra.forbid
id: str
label: str
value: Optional[Optional[Union[bool, str, float]]] = None
description: Optional[Optional[str]] = None
include: bool
class CatalogTable(BaseBigQueryModel):
class Config:
extra = Extra.forbid
metadata: TableMetadata
columns: Dict[str, ColumnMetadata]
stats: Dict[str, StatsItem]
unique_id: Optional[Optional[str]] = None
class CatalogV1(BaseBigQueryModel):
class Config:
extra = Extra.forbid
# The loaded_at field was manually added.
loaded_at: datetime = Field(default=datetime.utcnow(),
description="The loaded time by dbt-artifacts-loader")
metadata: CatalogMetadata
nodes: Dict[str, CatalogTable]
sources: Dict[str, CatalogTable]
errors: Optional[Optional[List[str]]] = None
| 26.392405
| 88
| 0.690168
|
04d3f55502ec4eab7d5d5e17223e044313b1da03
| 10,544
|
py
|
Python
|
homework-3/hw3p1/autograder/hw3_autograder/test_ctc_toy.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
homework-3/hw3p1/autograder/hw3_autograder/test_ctc_toy.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
homework-3/hw3p1/autograder/hw3_autograder/test_ctc_toy.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys, os, pdb
import pickle
from test import Test
sys.path.append("mytorch")
from ctc_loss import *
from ctc import *
data_path = os.path.join("autograder", "hw3_autograder", "data")
ref_data_path = os.path.join("autograder", "hw3_autograder", "data", "ctc_ref_data")
#################################################################################################
################################ Section 4 - CTC Loss ######################################
#################################################################################################
class CTCToyTest(Test):
def __init__(self):
pass
def test_ctc_extend_seq(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTC(BLANK=0)
f_ref_S_ext = open(os.path.join(ref_data_path, "ref_S_ext.pkl"), "rb")
f_ref_Skip_Connect = open(
os.path.join(ref_data_path, "ref_Skip_Connect.pkl"), "rb"
)
ref_S_ext_ls = pickle.load(f_ref_S_ext)
ref_Skip_Connect_ls = pickle.load(f_ref_Skip_Connect)
b = 0
target = targets[b, : out_lens[b]]
user_S_ext, user_Skip_Connect = CTC_user.targetWithBlank(target)
user_S_ext, user_Skip_Connect = (
np.array(user_S_ext),
np.array(user_Skip_Connect),
)
ref_S_ext = ref_S_ext_ls[b]
ref_Skip_Connect = ref_Skip_Connect_ls[b]
if not self.assertions(user_S_ext, ref_S_ext, "type", "extSymbols"):
return False
if not self.assertions(user_S_ext, ref_S_ext, "shape", "extSymbols"):
return False
if not self.assertions(user_S_ext, ref_S_ext, "closeness", "extSymbols"):
return False
if not self.assertions(
user_Skip_Connect, ref_Skip_Connect, "type", "Skip_Connect"
):
return False
if not self.assertions(
user_Skip_Connect, ref_Skip_Connect, "shape", "Skip_Connect"
):
return False
if not self.assertions(
user_Skip_Connect, ref_Skip_Connect, "closeness", "Skip_Connect"
):
return False
f_ref_S_ext.close()
f_ref_Skip_Connect.close()
return True
def test_ctc_forward_prob(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTC(BLANK=0)
f_ref_alpha = open(os.path.join(ref_data_path, "ref_alpha.pkl"), "rb")
ref_alpha_ls = pickle.load(f_ref_alpha)
b = 0
logit = probs[: input_lens[b], b]
target = targets[b, : out_lens[b]]
user_S_ext, user_Skip_Connect = CTC_user.targetWithBlank(target)
user_alpha = CTC_user.forwardProb(logit, user_S_ext, user_Skip_Connect)
ref_alpha = ref_alpha_ls[b]
if not self.assertions(user_alpha, ref_alpha, "type", "alpha"):
return False
if not self.assertions(user_alpha, ref_alpha, "shape", "alpha"):
return False
if not self.assertions(user_alpha, ref_alpha, "closeness", "alpha"):
return False
f_ref_alpha.close()
return True
def test_ctc_backward_prob(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTC(BLANK=0)
f_ref_alpha = open(os.path.join(ref_data_path, "ref_alpha.pkl"), "rb")
f_ref_beta = open(os.path.join(ref_data_path, "ref_beta.pkl"), "rb")
f_ref_gamma = open(os.path.join(ref_data_path, "ref_gamma.pkl"), "rb")
ref_alpha_ls = pickle.load(f_ref_alpha)
ref_beta_ls = pickle.load(f_ref_beta)
ref_gamma_ls = pickle.load(f_ref_gamma)
b = 0
logit = probs[: input_lens[b], b]
target = targets[b, : out_lens[b]]
user_S_ext, user_Skip_Connect = CTC_user.targetWithBlank(target)
user_beta = CTC_user.backwardProb(logit, user_S_ext, user_Skip_Connect)
ref_beta = ref_beta_ls[b]
if not self.assertions(user_beta, ref_beta, "type", "beta"):
return False
if not self.assertions(user_beta, ref_beta, "shape", "beta"):
return False
if not self.assertions(user_beta, ref_beta, "closeness", "beta"):
return False
f_ref_beta.close()
return True
def test_ctc_posterior_prob(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTC(BLANK=0)
f_ref_alpha = open(os.path.join(ref_data_path, "ref_alpha.pkl"), "rb")
f_ref_beta = open(os.path.join(ref_data_path, "ref_beta.pkl"), "rb")
f_ref_gamma = open(os.path.join(ref_data_path, "ref_gamma.pkl"), "rb")
ref_alpha_ls = pickle.load(f_ref_alpha)
ref_beta_ls = pickle.load(f_ref_beta)
ref_gamma_ls = pickle.load(f_ref_gamma)
b = 0
logit = probs[: input_lens[b], b]
target = targets[b, : out_lens[b]]
user_S_ext, user_Skip_Connect = CTC_user.targetWithBlank(target)
user_alpha = CTC_user.forwardProb(logit, user_S_ext, user_Skip_Connect)
user_beta = CTC_user.backwardProb(logit, user_S_ext, user_Skip_Connect)
user_gamma = CTC_user.postProb(user_alpha, user_beta)
ref_alpha = ref_alpha_ls[b]
ref_beta = ref_beta_ls[b]
ref_gamma = ref_gamma_ls[b]
if not self.assertions(user_alpha, ref_alpha, "type", "alpha"):
return False
if not self.assertions(user_alpha, ref_alpha, "shape", "alpha"):
return False
if not self.assertions(user_alpha, ref_alpha, "closeness", "alpha"):
return False
if not self.assertions(user_beta, ref_beta, "type", "beta"):
return False
if not self.assertions(user_beta, ref_beta, "shape", "beta"):
return False
if not self.assertions(user_beta, ref_beta, "closeness", "beta"):
return False
if not self.assertions(user_gamma, ref_gamma, "type", "gamma"):
return False
if not self.assertions(user_gamma, ref_gamma, "shape", "gamma"):
return False
if not self.assertions(user_gamma, ref_gamma, "closeness", "gamma"):
return False
f_ref_alpha.close()
f_ref_beta.close()
f_ref_gamma.close()
return True
def test_ctc_forward(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTCLoss(BLANK=0)
user_loss = CTC_user(probs, targets, input_lens, out_lens)
ref_loss = np.load(os.path.join(ref_data_path, "ref_loss.npy"))
if not self.assertions(user_loss, ref_loss, "closeness", "forward"):
return False
return True
def test_ctc_backward(self):
# Get curr data
probs = np.load(os.path.join(data_path, "X.npy"))
targets = np.load(os.path.join(data_path, "Y.npy"))
input_lens = np.load(os.path.join(data_path, "X_lens.npy"))
out_lens = np.load(os.path.join(data_path, "Y_lens.npy"))
CTC_user = CTCLoss(BLANK=0)
user_loss = CTC_user(probs, targets, input_lens, out_lens)
user_dy = CTC_user.backward()
ref_dy = np.load(os.path.join(ref_data_path, "ref_dy.npy"))
if not self.assertions(user_dy, ref_dy, "type", "backward"):
return False
if not self.assertions(user_dy, ref_dy, "closeness", "backward"):
return False
return True
def run_test(self):
# Test Extend Sequence with Blank
self.print_name("Section 4 - Extend Sequence with Blank")
extend_outcome = self.test_ctc_extend_seq()
self.print_outcome("Extend Sequence with Blank", extend_outcome)
if extend_outcome == False:
self.print_failure("Extend Sequence with Blank")
return False
# Test forward Probability
self.print_name("Section 4 - Forward Probability")
posterior_outcome = self.test_ctc_forward_prob()
self.print_outcome("Forward Probability", posterior_outcome)
if posterior_outcome == False:
self.print_failure("Posterior Probability")
return False
# Test backward Probability
self.print_name("Section 4 - Backward Probability")
posterior_outcome = self.test_ctc_backward_prob()
self.print_outcome("Backward Probability", posterior_outcome)
if posterior_outcome == False:
self.print_failure("Posterior Probability")
return False
# Test Posterior Probability
self.print_name("Section 4 - Posterior Probability")
posterior_outcome = self.test_ctc_posterior_prob()
self.print_outcome("Posterior Probability", posterior_outcome)
if posterior_outcome == False:
self.print_failure("Posterior Probability")
return False
# Test forward
self.print_name("Section 4.1 - CTC Forward")
forward_outcome = self.test_ctc_forward()
self.print_outcome("CTC Forward", forward_outcome)
if forward_outcome == False:
self.print_failure("CTC Forward")
return False
# Test Backward
self.print_name("Section 4.2 - CTC Backward")
backward_outcome = self.test_ctc_backward()
self.print_outcome("CTC backward", backward_outcome)
if backward_outcome == False:
self.print_failure("CTC Backward")
return False
return True
| 36.233677
| 97
| 0.611817
|
9c4b18056d60c4dcd3417110db04daea07d2ea69
| 3,483
|
py
|
Python
|
.history/DEBER_20210831114112.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
.history/DEBER_20210831114112.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
.history/DEBER_20210831114112.py
|
Alopezm5/PROYECTO-PARTE-1
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
[
"MIT"
] | null | null | null |
class Nomina:
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr="",email="",estado="",profe="",dep=""):#3
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.departamento=dep
class Empresa(Nomina):
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("Datos de la Empresa")
print("La empresa "{}"\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de "{}"\n Es una entidad "{}"".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Departamento(Empleado):
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: "{}"".format(self.depa))
class Empleado(Nomina):
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):#falta dos atributo como definicion de oficina
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
if eleccion==1:
print(self.estadocivil)
elif eleccion==2:
print(self.profesion)
# class Pagos():
# def __init__(self):
# pass
# def pagoNormal(self, valhora,hoesti,hotraba, desc, desper):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# def pagoExtra(self, valhora,hoesti,hotraba,incentivos):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.bono=incentivos
# def Nomina(self, nom, valhora,hoesti,hotraba, desc, desper,incentivos):#faltan 8 atributos incluir cosas del empleado y sobretiempo
# self.nombre= nom
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# self.bono=incentivos
nom=Nomina()
emp=Empresa()
emp.datosEmpresa()
emp.mostrarEmpresa()
# emple=Empleado()
# emple.empleado()
# eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
# if eleccion==1:
# emple.empleadoObrero()
# elif eleccion==2:
# emple.empleadoOficina()
# else:
# print("No selecciono el tipo de empleado")
# emple.mostrarempleado()
| 35.907216
| 247
| 0.656044
|
875bfac76a5f87ef9cd19200d764c9ee6ae69308
| 2,999
|
py
|
Python
|
tests/safety/common.py
|
loveks520/panda
|
0c2c1494908916ce776f1eb047947329e9887049
|
[
"MIT"
] | 10
|
2020-02-21T21:21:36.000Z
|
2022-01-30T12:48:15.000Z
|
tests/safety/common.py
|
loveks520/panda
|
0c2c1494908916ce776f1eb047947329e9887049
|
[
"MIT"
] | 1
|
2021-03-20T20:37:40.000Z
|
2021-03-20T20:37:40.000Z
|
tests/safety/common.py
|
loveks520/panda
|
0c2c1494908916ce776f1eb047947329e9887049
|
[
"MIT"
] | 10
|
2020-02-15T07:34:36.000Z
|
2022-03-21T05:49:47.000Z
|
from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
ENABLE_WEAK_STEERING_WHILE_NOT_ENGAGED = 4
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def make_msg(bus, addr, length=8):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
to_send[0].RIR = (addr << 3) | 5
else:
to_send[0].RIR = (addr << 21) | 1
to_send[0].RDTR = length
to_send[0].RDTR |= bus << 4
return to_send
class StdTest:
@staticmethod
def test_relay_malfunction(test, addr, bus=0):
# input is a test class and the address that, if seen on specified bus, triggers
# the relay_malfunction protection logic: both tx_hook and fwd_hook are
# expected to return failure
test.assertFalse(test.safety.get_relay_malfunction())
test.safety.safety_rx_hook(make_msg(bus, addr, 8))
test.assertTrue(test.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))
test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
@staticmethod
def test_manually_enable_controls_allowed(test):
test.safety.set_controls_allowed(1)
test.assertTrue(test.safety.get_controls_allowed())
test.safety.set_controls_allowed(0)
test.assertFalse(test.safety.get_controls_allowed())
@staticmethod
def test_spam_can_buses(test, TX_MSGS):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in TX_MSGS):
test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))
@staticmethod
def test_allow_brake_at_zero_speed(test):
# Brake was already pressed
test.safety.safety_rx_hook(test._speed_msg(0))
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0))
test.assertTrue(test.safety.get_controls_allowed())
# rising edge of brake should disengage
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes
@staticmethod
def test_not_allow_brake_when_moving(test, standstill_threshold):
# Brake was already pressed
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(0))
| 38.448718
| 84
| 0.741247
|
c0e607109e5f850d00b6b257c5e585b15bac3e49
| 1,590
|
py
|
Python
|
pyrcm/terminal/rediscmd_noop.py
|
booleys1012/redisclustermon
|
af51685271c9d8f0aa48e5995be6c2c52cf7fc1b
|
[
"MIT"
] | null | null | null |
pyrcm/terminal/rediscmd_noop.py
|
booleys1012/redisclustermon
|
af51685271c9d8f0aa48e5995be6c2c52cf7fc1b
|
[
"MIT"
] | null | null | null |
pyrcm/terminal/rediscmd_noop.py
|
booleys1012/redisclustermon
|
af51685271c9d8f0aa48e5995be6c2c52cf7fc1b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 Justin Bewley Lo (justinbewley.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pyrcm.terminal.rediscmd import RedisClusterCmd
class RedisClusterCmd_Noop(RedisClusterCmd):
CMDNAME = 'NOOP'
CMDDETAILS = {
'read_or_write': None,
'description': None,
'example': None,
}
def __init__(self, rc_client, args):
super(RedisClusterCmd_Noop, self).__init__(rc_client, args)
def get_args_error(self):
return 'invalid command: {}'.format(self.args[0])
| 36.976744
| 80
| 0.737736
|
83ee36a8ca54eb596bd30adec8a7adb61705d638
| 883
|
py
|
Python
|
wintria/wintriauser/views.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | 2
|
2017-10-04T20:53:09.000Z
|
2021-11-12T10:02:32.000Z
|
wintria/wintriauser/views.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | null | null | null |
wintria/wintriauser/views.py
|
codelucas/wintria.com
|
99c3f20d64e6ecf3d02cf0117233de349274a607
|
[
"MIT"
] | null | null | null |
"""
"""
from django.http import HttpResponseRedirect
from wintria.wintriauser.forms import FeedbackForm
from wintria.wintria.views import render_with_context
from wintria.wintria.settings import get_root_url
def send_feedback(request):
form_args = {}
if request.POST:
form_args['data'] = request.POST
feedback_form = FeedbackForm(**form_args)
if feedback_form.is_valid():
feedback = feedback_form.save(commit=True)
return HttpResponseRedirect(get_root_url() +
'/thanks_for_all_the_fish/')
else:
feedback_form = FeedbackForm(**form_args)
return render_with_context(request, 'feedback.html',
kwargs={ 'feedback_form': feedback_form })
def thanks_for_all_the_fish(request):
return render_with_context(request, 'thanks.html', kwargs={})
| 35.32
| 73
| 0.672707
|
026e70d20e3df1e676c0c929a3646b6122c3ba06
| 1,467
|
py
|
Python
|
setup.py
|
patowc/django-encrypted-field
|
ffe02eab39a85dc988d1330e673ff7b22c4056b1
|
[
"MIT"
] | null | null | null |
setup.py
|
patowc/django-encrypted-field
|
ffe02eab39a85dc988d1330e673ff7b22c4056b1
|
[
"MIT"
] | null | null | null |
setup.py
|
patowc/django-encrypted-field
|
ffe02eab39a85dc988d1330e673ff7b22c4056b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_description = (
open('README.md').read()
)
version = '1.0.4'
setup(
name='django-encrypted-field',
description=(
'This is a Django Model Field class that can be '
'encrypted using ChaCha20 poly 1305, and other algorithms.'
),
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/patowc/django-encrypted-field',
license='MIT',
author='Román Ramírez',
author_email='rramirez@rootedcon.com',
packages=find_packages(),
version=version,
install_requires=[
'Django>=4.0',
'pycryptodomex>=3.12.0'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Django',
],
zip_safe=False,
)
| 29.34
| 70
| 0.615542
|
a965ae51949ee01d477a3cfd9cab0f24f5b4edf6
| 25,777
|
py
|
Python
|
tensorflow_probability/python/sts/structural_time_series_test.py
|
jvishnuvardhan/probability
|
a408f8fbde831a40df3fb10023deaf997d104b24
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/sts/structural_time_series_test.py
|
jvishnuvardhan/probability
|
a408f8fbde831a40df3fb10023deaf997d104b24
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/sts/structural_time_series_test.py
|
jvishnuvardhan/probability
|
a408f8fbde831a40df3fb10023deaf997d104b24
|
[
"Apache-2.0"
] | 1
|
2021-06-09T00:10:32.000Z
|
2021-06-09T00:10:32.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.sts.structural_time_series."""
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import Autoregressive
from tensorflow_probability.python.sts import AutoregressiveIntegratedMovingAverage
from tensorflow_probability.python.sts import DynamicLinearRegression
from tensorflow_probability.python.sts import LinearRegression
from tensorflow_probability.python.sts import LocalLevel
from tensorflow_probability.python.sts import LocalLinearTrend
from tensorflow_probability.python.sts import Seasonal
from tensorflow_probability.python.sts import SemiLocalLinearTrend
from tensorflow_probability.python.sts import SmoothSeasonal
from tensorflow_probability.python.sts import SparseLinearRegression
from tensorflow_probability.python.sts import Sum
from tensorflow_probability.python.sts.internal import util as sts_util
class _StructuralTimeSeriesTests(object):
def test_broadcast_batch_shapes(self):
seed = test_util.test_seed(sampler_type='stateless')
batch_shape = [3, 1, 4]
partial_batch_shape = [2, 1]
expected_broadcast_batch_shape = [3, 2, 4]
# Build a model where parameters have different batch shapes.
partial_batch_loc = self._build_placeholder(
np.random.randn(*partial_batch_shape))
full_batch_loc = self._build_placeholder(
np.random.randn(*batch_shape))
partial_scale_prior = tfd.LogNormal(
loc=partial_batch_loc, scale=tf.ones_like(partial_batch_loc))
full_scale_prior = tfd.LogNormal(
loc=full_batch_loc, scale=tf.ones_like(full_batch_loc))
loc_prior = tfd.Normal(loc=partial_batch_loc,
scale=tf.ones_like(partial_batch_loc))
linear_trend = LocalLinearTrend(level_scale_prior=full_scale_prior,
slope_scale_prior=full_scale_prior,
initial_level_prior=loc_prior,
initial_slope_prior=loc_prior)
seasonal = Seasonal(num_seasons=3,
drift_scale_prior=partial_scale_prior,
initial_effect_prior=loc_prior)
model = Sum([linear_trend, seasonal],
observation_noise_scale_prior=partial_scale_prior)
param_samples = [p.prior.sample(seed=seed) for p in model.parameters]
ssm = model.make_state_space_model(num_timesteps=2,
param_vals=param_samples)
# Test that the model's batch shape matches the SSM's batch shape,
# and that they both match the expected broadcast shape.
self.assertAllEqual(model.batch_shape, ssm.batch_shape)
(model_batch_shape_tensor_,
ssm_batch_shape_tensor_) = self.evaluate((model.batch_shape_tensor(),
ssm.batch_shape_tensor()))
self.assertAllEqual(model_batch_shape_tensor_, ssm_batch_shape_tensor_)
self.assertAllEqual(model_batch_shape_tensor_,
expected_broadcast_batch_shape)
def test_addition_raises_error_with_no_observed_time_series(self):
c1 = tfp.sts.LocalLevel(level_scale_prior=tfd.Normal(0., 1.),
initial_level_prior=tfd.Normal(0., 1.))
c2 = tfp.sts.LocalLevel(level_scale_prior=tfd.Normal(0., 0.1),
initial_level_prior=tfd.Normal(1., 2.))
with self.assertRaisesRegex(
ValueError, 'Could not automatically create a `Sum` component'):
c1 + c2 # pylint: disable=pointless-statement
def test_adding_two_sums(self):
observed_time_series = self._build_placeholder([1., 2., 3., 4., 5.])
s1 = tfp.sts.Sum(
[tfp.sts.LocalLevel(observed_time_series=observed_time_series)],
observed_time_series=observed_time_series)
s2 = tfp.sts.Sum(
[tfp.sts.LocalLinearTrend(observed_time_series=observed_time_series)],
observed_time_series=observed_time_series)
s3 = s1 + s2
self.assertLen(s3.components, 2)
seed = test_util.test_seed(sampler_type='stateless')
def observation_noise_scale_prior_sample(s):
return s.parameters[0].prior.sample(seed=seed)
self.assertAllEqual(observation_noise_scale_prior_sample(s3),
observation_noise_scale_prior_sample(s1))
self.assertAllEqual(observation_noise_scale_prior_sample(s3),
observation_noise_scale_prior_sample(s2))
self.assertAllEqual(s3.constant_offset, s1.constant_offset)
self.assertAllEqual(s3.constant_offset, s2.constant_offset)
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(observed_time_series=3 * observed_time_series) + s2 # pylint: disable=expression-not-assigned
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(constant_offset=4.) + s2 # pylint: disable=expression-not-assigned
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(observation_noise_scale_prior=tfd.Normal( # pylint: disable=expression-not-assigned
self._build_placeholder(0.), self._build_placeholder(1.))) + s2
def _build_placeholder(self, ndarray, dtype=None):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
dtype: optional `dtype`; if not specified, defaults to `self.dtype`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
if dtype is None:
dtype = self.dtype
ndarray = np.asarray(ndarray).astype(dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsStaticShape32(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsDynamicShape32(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsStaticShape64(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float64
use_static_shape = True
class _StsTestHarness(object):
def test_state_space_model(self):
seed = test_util.test_seed(sampler_type='stateless')
model = self._build_sts()
dummy_param_vals = [p.prior.sample(seed=seed) for p in model.parameters]
initial_state_prior = tfd.MultivariateNormalDiag(
loc=-2. + tf.zeros([model.latent_size]),
scale_diag=3. * tf.ones([model.latent_size]))
mask = tf.convert_to_tensor(
[False, True, True, False, False, False, False, True, False, False],
dtype=tf.bool)
# Verify we build the LGSSM without errors.
ssm = model.make_state_space_model(
num_timesteps=10,
param_vals=dummy_param_vals,
initial_state_prior=initial_state_prior,
initial_step=1,
mask=mask)
# Verify that the child class passes the initial step, prior, and mask
# arguments through to the SSM.
self.assertEqual(self.evaluate(ssm.initial_step), 1)
self.assertEqual(ssm.initial_state_prior, initial_state_prior)
self.assertAllEqual(ssm.mask, mask)
# Verify the model has the correct latent size.
self.assertEqual(
self.evaluate(
tf.convert_to_tensor(
ssm.latent_size_tensor())),
model.latent_size)
# Verify that the SSM tracks its parameters.
seed = test_util.test_seed(sampler_type='stateless')
observed_time_series = self.evaluate(
samplers.normal([10, 1], seed=seed))
ssm_copy = ssm.copy(name='copied_ssm')
self.assertAllClose(*self.evaluate((
ssm.log_prob(observed_time_series),
ssm_copy.log_prob(observed_time_series))))
def test_log_joint(self):
seed = test_util.test_seed(sampler_type='stateless')
model = self._build_sts()
num_timesteps = 5
# simple case: single observation, and all params unbatched
log_joint_fn = model.joint_log_prob(
observed_time_series=np.float32(
np.random.standard_normal([num_timesteps, 1])))
lp_seed1, lp_seed2 = tfp.random.split_seed(seed, n=2)
seeds = tfp.random.split_seed(lp_seed1, n=len(model.parameters))
lp = self.evaluate(
log_joint_fn(*[p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]))
self.assertEqual(tf.TensorShape([]), lp.shape)
# more complex case: y has sample and batch shapes, some parameters
# have partial batch shape.
full_batch_shape = [2, 3]
partial_batch_shape = [3]
sample_shape = [4]
log_joint_fn = model.joint_log_prob(
observed_time_series=np.float32(
np.random.standard_normal(sample_shape + full_batch_shape +
[num_timesteps, 1])))
# We alternate full_batch_shape, partial_batch_shape in sequence so that in
# a model with only one parameter, that parameter is constructed with full
# batch shape.
seeds = tfp.random.split_seed(lp_seed2, n=len(model.parameters))
batch_shaped_parameters_ = self.evaluate([
p.prior.sample(sample_shape=full_batch_shape if (i % 2 == 0)
else partial_batch_shape, seed=seed)
for (i, (seed, p)) in enumerate(zip(seeds, model.parameters))])
lp = self.evaluate(log_joint_fn(*batch_shaped_parameters_))
self.assertEqual(tf.TensorShape(full_batch_shape), lp.shape)
# Check that the log joint function also supports parameters passed
# as kwargs.
parameters_by_name_ = {
p.name: v for (p, v) in zip(model.parameters, batch_shaped_parameters_)}
lp_with_kwargs = self.evaluate(log_joint_fn(**parameters_by_name_))
self.assertAllClose(lp, lp_with_kwargs)
def test_constant_series_does_not_induce_constant_prior(self):
observed_time_series = np.array([1.0, 1.0, 1.0]).astype(np.float32)
model = self._build_sts(observed_time_series=observed_time_series)
for parameter in model.parameters:
param_samples = self.evaluate(
parameter.prior.sample(
30, seed=test_util.test_seed(sampler_type='stateless')))
self.assertAllGreater(tf.math.reduce_std(param_samples), 0.)
def test_log_joint_with_missing_observations(self):
# Test that this component accepts MaskedTimeSeries inputs. In most
# cases, it is sufficient that the component accesses only
# `empirical_statistics(observed_time_series)`.
# TODO(b/139483802): De-flake this test when run with --vary_seed.
seed = test_util.test_seed(hardcoded_seed=123, sampler_type='stateless')
observed_time_series = np.array(
[1.0, 2.0, -1000., 0.4, np.nan, 1000., 4.2, np.inf]).astype(np.float32)
observation_mask = np.array(
[False, False, True, False, True, True, False, True]).astype(np.bool_)
masked_time_series = tfp.sts.MaskedTimeSeries(observed_time_series,
is_missing=observation_mask)
model = self._build_sts(observed_time_series=masked_time_series)
log_joint_fn = model.joint_log_prob(
observed_time_series=masked_time_series)
seeds = tfp.random.split_seed(seed, n=len(model.parameters))
lp = self.evaluate(
log_joint_fn(*[p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]))
self.assertEqual(tf.TensorShape([]), lp.shape)
self.assertTrue(np.isfinite(lp))
def test_prior_sample(self):
model = self._build_sts()
ys, param_samples = model.prior_sample(
num_timesteps=5, params_sample_shape=[2], trajectories_sample_shape=[3],
seed=test_util.test_seed(sampler_type='stateless'))
self.assertAllEqual(ys.shape, [3, 2, 5, 1])
self.assertEqual(len(param_samples), len(model.parameters))
for i in range(len(param_samples)):
sampled = param_samples[i]
param = model.parameters[i]
self.assertAllEqual(sampled.shape, [
2,
] + param.prior.batch_shape.as_list() + param.prior.event_shape.as_list())
def test_joint_distribution_log_prob(self):
model = self._build_sts(
# Dummy series to build the model with float64 priors. Working in
# float64 minimizes numeric inconsistencies between log-prob
# implementations.
observed_time_series=np.float64([1., 0., 1., 0.]))
jd_no_trajectory_shape = model.joint_distribution(num_timesteps=11)
self.assertLen(jd_no_trajectory_shape.dtype, len(model.parameters) + 1)
jd = model.joint_distribution(trajectories_shape=[2], num_timesteps=11)
self.assertLen(jd.dtype, len(model.parameters) + 1)
# Time series sampled from the JD should have the expected shape.
samples = self.evaluate(
jd.sample(seed=test_util.test_seed(sampler_type='stateless')))
observed_time_series = samples['observed_time_series']
self.assertAllEqual(tf.shape(observed_time_series),
tf.concat([model.batch_shape_tensor(), [2, 11, 1]],
axis=0))
# The JD's `log_prob` val should match the previous `joint_log_prob` method.
sampled_params = list(samples.values())[:-1]
lp0 = model.joint_log_prob(observed_time_series)(*sampled_params)
lp1 = jd.log_prob(samples)
self.assertAllClose(lp0, lp1)
# Passing `observed_time_series` should return the pinned distribution.
jd_pinned = model.joint_distribution(
observed_time_series=observed_time_series)
lp2 = jd_pinned.unnormalized_log_prob(*sampled_params)
self.assertAllClose(lp0, lp2)
# The JD should expose the STS bijectors as its default bijectors.
jd_bijectors = jd._model_unflatten(
jd.experimental_default_event_space_bijector().bijectors)
for param in model.parameters:
self.assertEqual(param.bijector, jd_bijectors[param.name])
def test_default_priors_follow_batch_shapes(self):
seed = test_util.test_seed(sampler_type='stateless')
num_timesteps = 3
time_series_sample_shape = [4, 2]
observation_shape_full = time_series_sample_shape + [num_timesteps]
dummy_observation = np.random.randn(
*(observation_shape_full)).astype(np.float32)
model = self._build_sts(observed_time_series=dummy_observation)
# The model should construct a default parameter prior for *each* observed
# time series, so the priors will have batch_shape equal to
# `time_series_sample_shape`.
for parameter in model.parameters:
self.assertEqual(parameter.prior.batch_shape, time_series_sample_shape)
# The initial state prior should also have the appropriate batch shape.
# To test this, we build the ssm and test that it has a consistent
# broadcast batch shape.
seeds = tfp.random.split_seed(seed, n=len(model.parameters))
param_samples = [p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]
ssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=param_samples)
self.assertEqual(ssm.batch_shape, time_series_sample_shape)
def test_copy(self):
model = self._build_sts()
copy = model.copy()
self.assertNotEqual(id(model), id(copy))
self.assertAllEqual([p.name for p in model.parameters],
[p.name for p in copy.parameters])
def test_add_component(self):
model = self._build_sts(
observed_time_series=np.array([1., 2., 3.], np.float32))
new_component = tfp.sts.LocalLevel(name='LocalLevel2')
sum_model = model + new_component
ledom_mus = new_component + model # `sum_model` backwards.
self.assertIsInstance(sum_model, tfp.sts.Sum)
self.assertIsInstance(ledom_mus, tfp.sts.Sum)
self.assertEqual(sum_model.components[-1], new_component)
self.assertEqual(ledom_mus.components[0], new_component)
self.assertEqual(set([p.name for p in sum_model.parameters]),
set([p.name for p in ledom_mus.parameters]))
# If we built a new Sum component (rather than extending an existing one),
# we should have passed an observed_time_series so that we get reasonable
# default priors.
if not isinstance(model, tfp.sts.Sum):
self.assertIsNotNone(sum_model.init_parameters['observed_time_series'])
self.assertIsNotNone(ledom_mus.init_parameters['observed_time_series'])
@test_util.test_all_tf_execution_regimes
class AutoregressiveTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return Autoregressive(order=3, observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class ARMATest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
one = 1.
if observed_time_series is not None:
observed_time_series = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
one = tf.ones_like(observed_time_series.time_series[..., 0, 0])
return AutoregressiveIntegratedMovingAverage(
ar_order=3,
ma_order=1,
integration_degree=0,
level_drift_prior=tfd.Normal(loc=one, scale=one),
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class ARIMATest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return AutoregressiveIntegratedMovingAverage(
ar_order=1, ma_order=2, integration_degree=2,
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class LocalLevelTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return LocalLevel(observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class LocalLinearTrendTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return LocalLinearTrend(observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SeasonalTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
# Note that a Seasonal model with `num_steps_per_season > 1` would have
# deterministic dependence between timesteps, so evaluating `log_prob` of an
# arbitrary time series leads to Cholesky decomposition errors unless the
# model also includes an observation noise component (which it would in
# practice, but this test harness attempts to test the component in
# isolation). The `num_steps_per_season=1` case tested here will not suffer
# from this issue.
return Seasonal(num_seasons=7,
num_steps_per_season=1,
observed_time_series=observed_time_series,
constrain_mean_effect_to_zero=False)
@test_util.test_all_tf_execution_regimes
class SeasonalWithZeroMeanConstraintTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return Seasonal(num_seasons=7,
num_steps_per_season=1,
observed_time_series=observed_time_series,
constrain_mean_effect_to_zero=True)
@test_util.test_all_tf_execution_regimes
class SeasonalWithMultipleStepsAndNoiseTest(test_util.TestCase,
_StsTestHarness):
def _build_sts(self, observed_time_series=None):
day_of_week = tfp.sts.Seasonal(num_seasons=7,
num_steps_per_season=24,
allow_drift=False,
observed_time_series=observed_time_series,
name='day_of_week')
return tfp.sts.Sum(components=[day_of_week],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SemiLocalLinearTrendTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return SemiLocalLinearTrend(observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SmoothSeasonalTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
return SmoothSeasonal(period=42,
frequency_multipliers=[1, 2, 4],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SmoothSeasonalWithNoDriftTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
smooth_seasonal = SmoothSeasonal(period=42,
frequency_multipliers=[1, 2, 4],
allow_drift=False,
observed_time_series=observed_time_series)
# The test harness doesn't like models with no parameters, so wrap with Sum.
return tfp.sts.Sum([smooth_seasonal],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SumTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
first_component = LocalLinearTrend(
observed_time_series=observed_time_series, name='first_component')
second_component = LocalLinearTrend(
observed_time_series=observed_time_series, name='second_component')
return Sum(
components=[first_component, second_component],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class LinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
max_timesteps = 100
num_features = 3
prior = tfd.Sample(tfd.Laplace(0., 1.), sample_shape=[num_features])
# LinearRegression components don't currently take an `observed_time_series`
# argument, so they can't infer a prior batch shape. This means we have to
# manually set the batch shape expected by the tests.
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
batch_shape = tf.shape(observed_time_series_tensor)[:-2]
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
prior = tfd.Sample(tfd.Laplace(tf.zeros(batch_shape, dtype=dtype), 1.),
sample_shape=[num_features])
regression = LinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
weights_prior=prior)
return Sum(components=[regression],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class SparseLinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
max_timesteps = 100
num_features = 3
# LinearRegression components don't currently take an `observed_time_series`
# argument, so they can't infer a prior batch shape. This means we have to
# manually set the batch shape expected by the tests.
batch_shape = None
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
batch_shape = tf.shape(observed_time_series_tensor)[:-2]
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
regression = SparseLinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
weights_batch_shape=batch_shape)
return Sum(components=[regression],
observed_time_series=observed_time_series)
@test_util.test_all_tf_execution_regimes
class DynamicLinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None):
max_timesteps = 100
num_features = 3
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
return DynamicLinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
observed_time_series=observed_time_series)
if __name__ == '__main__':
test_util.main()
| 42.119281
| 108
| 0.718974
|
fc35a32b0bebe233e0c81b1c63a91cc6b2139e0e
| 3,138
|
py
|
Python
|
analysis_sms.py
|
muyu66t/sunhanaixl
|
62d567fdf946736e7f015945658e503bdd0d0f1d
|
[
"Apache-2.0"
] | 26
|
2021-03-05T09:44:57.000Z
|
2022-01-10T11:44:04.000Z
|
analysis_sms.py
|
dangerwolf/SyncXiaomiCloud
|
f8f4f21d289811a738952d096204aca8c4b99e99
|
[
"Apache-2.0"
] | 1
|
2021-07-08T15:39:05.000Z
|
2021-07-08T15:39:05.000Z
|
analysis_sms.py
|
dangerwolf/SyncXiaomiCloud
|
f8f4f21d289811a738952d096204aca8c4b99e99
|
[
"Apache-2.0"
] | 4
|
2021-03-05T08:19:17.000Z
|
2021-11-17T05:44:08.000Z
|
import os,sys,re,json,time
import openpyxl
from openpyxl.styles import Alignment,Border,Side
import difflib
'''
给定一个从小米云上同步,并格式化好的sms.json文件
以及格式化好的通讯录电话为key的文件
输出短信记录,到一个excel表里面
'''
fname='sms.json'
contact_json_file='phone_dict.json'
def simRatio(s1, s2): # 比较两个字符串的相似度,结果为0-1之间的数
s = difflib.SequenceMatcher(None, s1, s2)
return s.ratio()
def load_file(fname):
ss=open(fname,'r',encoding='utf8').read()
return json.loads(ss)
def get_name_by_phone(phone,phone_dict):
#给定一个号码,先看直接命中了没有,命中了的话,就返回这个对应名字
#如果没有命中,看它是不是其中某几个号码的子集,如果是子集,再这几个匹配到了号码里面,选一个相似度最大的一个号码返回
if phone in phone_dict:
return phone_dict[phone]
res=None
max_r=0
for p in phone_dict:
if p.find(phone)>-1:
r=simRatio(p,phone)
if r>max_r:
max_r=r
res=p
if not res or max_r<0.8: #要是没有匹配到,或者匹配到的相似系数不够大,那么就返回空
return ''
return phone_dict[res]
def convert_sms_to_xls(src,tgt,contact=contact_json_file):
sms=load_file(src)
phone_dict=load_file(contact)
xls_file=tgt
res=[]
for s in sms:
txt=s['entry']['snippet']
ts=s['entry']['localTime']/1000
date_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts) )
sender=s['entry']['recipients']
sender=re.sub(r'^\+86|^0086','',sender)
name=get_name_by_phone(sender,phone_dict)
res.append({'name':name,'sender':sender,'time':date_time,'txt':txt,})
open('sms_res.json','w',encoding='utf8').write(json.dumps(res,indent=2,ensure_ascii=False))
book = openpyxl.Workbook()
border = Border(
left=Side(
border_style="thin",
color="FF0000"
),
right=Side(
border_style="thin",
color="FF0000"
),
top=Side(
border_style="thin",
color="FF0000"
),
bottom=Side(
border_style="thin",
color="FF0000"
)
)
col_names = [chr(i) for i in range(65, 65 + 26)] # 'A'...'Z'的列
widths = {'name': 20, 'sender': 22, 'time':21,'txt':110}
col_widths = {}
sheet = book.create_sheet(title='短信')
row = 1
col = 1
id = 1
for t in widths:
sheet.cell(row, col, t).border = border
col += 1
id += 1
col_widths[col - 1] = widths[t] # 要是列名字是预定义的,直接取数值
row += 1
for row_data in res:
col = 1
for k in row_data:
try:
sheet.cell(row, col).alignment = Alignment(wrapText=True, vertical='center')
sheet.cell(row, col).border = border
sheet.cell(row, col).value = row_data[k]
except Exception as e:
print(e)
print(row_data[k])
col += 1
row += 1
for col in col_widths: # 给每一列调整下列宽
sheet.column_dimensions[col_names[col - 1]].width = col_widths[col]
try:
del book['Sheet'] # 会默认多建一个叫"Sheet"的表,如果存在就把它删掉
except Exception as e:
pass
book.save(xls_file)
return res
if __name__=='__main__':
convert_sms_to_xls(src=fname,tgt='sms.xlsx')
| 29.055556
| 95
| 0.581262
|
6ba972067669406f2e216fa6b00f8816bc75ed4f
| 4,554
|
py
|
Python
|
cluster.py
|
cyjack/TextCluster
|
53204b012b7fc11a42a754720d510fd72b9fa695
|
[
"BSD-3-Clause"
] | 1
|
2020-09-16T06:52:10.000Z
|
2020-09-16T06:52:10.000Z
|
cluster.py
|
cyjack/TextCluster
|
53204b012b7fc11a42a754720d510fd72b9fa695
|
[
"BSD-3-Clause"
] | null | null | null |
cluster.py
|
cyjack/TextCluster
|
53204b012b7fc11a42a754720d510fd72b9fa695
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import argparse
from collections import defaultdict
from tqdm import tqdm
from utils.similar import jaccard
from utils.segmentor import Segmentor
from utils.utils import check_file, ensure_dir, clean_dir, sample_file, get_stop_words, line_counter, Range
def _get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--infile', type=str, default='./data/infile', help='Directory of input file.')
parser.add_argument('--output', type=str, default='./data/output', help='Directory to save output file.')
parser.add_argument('--dict', type=str, default='./data/seg_dict', help='Directory of dict file.')
parser.add_argument('--stop_words', type=str, default='./data/stop_words', help='Directory of stop words.')
parser.add_argument('--sample_number', type=int, default=5, choices=[Range(1)], help='Sample number for each bucket.')
parser.add_argument('--threshold', type=float, default=0.3, choices=[Range(0.0, 1.0)], help='Threshold for matching.')
parser.add_argument('--name_len', type=int, default=9, choices=[Range(2)], help='Filename length.')
parser.add_argument('--name_len_update', type=bool, default=False, help='To update file name length.')
parser.add_argument('--lang', type=str, choices=['cn', 'en'], default='cn', help='Segmentor language setting.')
args = parser.parse_args()
return args
def main():
args = _get_parser()
# preliminary work
check_file(args.infile)
ensure_dir(args.output)
if args.name_len_update:
line_cnt = line_counter(args.infile)
args.name_len = len(str(line_cnt)) + 1
clean_dir(args.output, args.name_len)
# end preliminary work
p_bucket = defaultdict(list)
save_idx = 0
id_name = '{0:0' + str(args.name_len) + 'd}'
# load stop words
stop_words = get_stop_words(args.stop_words) if os.path.exists(args.stop_words) else list()
# load tokenizer
seg = Segmentor(args)
print('Splitting sentence into different clusters ...')
infile = open(args.infile, 'r', encoding="utf-8")
for line in tqdm(infile):
line = line.rstrip()
is_match = False
seg_list = list(seg.cut(line))
if stop_words:
seg_list = list(filter(lambda x: x not in stop_words, seg_list))
for wd in seg_list:
if is_match:
break
w_bucket = p_bucket[wd]
for bucket in w_bucket:
bucket_path = os.path.join(args.output, bucket)
check_file(bucket_path)
selected = sample_file(bucket_path, args.sample_number)
selected = list(map(lambda x: list(seg.cut(x)), selected))
# remove stop words
if stop_words:
filt_selected = list()
for sen in selected:
sen = list(filter(lambda x: x not in stop_words, sen))
filt_selected.append(sen)
selected = filt_selected
# calculate similarity with each bucket
if all(jaccard(seg_list, cmp_list) > args.threshold for cmp_list in selected):
is_match = True
with open(bucket_path, 'a', encoding='utf-8') as outfile:
outfile.write(line+'\n')
for w in seg_list:
if bucket not in p_bucket[w]:
p_bucket[w].append(bucket)
break
if not is_match:
bucket_name = ('tmp' + id_name).format(save_idx)
bucket_path = os.path.join(args.output, bucket_name)
with open(bucket_path, 'a', encoding='utf-8') as outfile:
outfile.write(line+'\n')
for w in seg_list:
p_bucket[w].append(bucket_name)
save_idx += 1
infile.close()
# sort and rename file
file_list = os.listdir(args.output)
file_list = list(filter(lambda x: x.startswith('tmp'), file_list))
cnt = dict()
for file in file_list:
file_path = os.path.join(args.output, file)
cnt[file] = line_counter(file_path)
sorted_cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
for idx, (file_name, times) in enumerate(sorted_cnt):
origin_path = os.path.join(args.output, file_name)
new_path = os.path.join(args.output, id_name.format(idx))
os.rename(origin_path, new_path)
print('All is well')
if __name__ == '__main__':
main()
| 40.300885
| 122
| 0.614185
|
602a161aaef84e579c79ba21d0e7f1c845b2cdf1
| 4,391
|
py
|
Python
|
becke/GaussianBasis.py
|
humeniuka/becke_multicenter_integration
|
7e06e4b931232b9ccc0fd6dedef7f7ad5a5e4cda
|
[
"MIT"
] | 1
|
2021-11-10T18:25:21.000Z
|
2021-11-10T18:25:21.000Z
|
becke/GaussianBasis.py
|
humeniuka/becke_multicenter_integration
|
7e06e4b931232b9ccc0fd6dedef7f7ad5a5e4cda
|
[
"MIT"
] | null | null | null |
becke/GaussianBasis.py
|
humeniuka/becke_multicenter_integration
|
7e06e4b931232b9ccc0fd6dedef7f7ad5a5e4cda
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
uncontracted cartesian Gaussian-type orbitals
"""
import numpy as np
import numpy.linalg as la
from becke.GaussianIntegrals import norm
def cartesian_shell_ordering(LType):
"""
ordering of cartesian basis function in one shell
according to the Gaussian 09 source code in file
'utilnz.F' lines 70053-70064
Parameters
----------
LType : L value of shell, 0: s, 1: p, 2: 6D, etc.
Returns
-------
iterator to labels of cartesian basis functions
"""
# s,p,6d and 10f shells
ordering = [
["S"], # S
["X","Y","Z"], # P
["XX","YY","ZZ", "XY","XZ","YZ"], # 6D
["XXX","YYY","ZZZ","XYY","XXY","XXZ","XZZ","YZZ","YYZ","XYZ"]] # 10F
if LType < 4:
for XLab in ordering[LType]:
yield XLab
return
# higher shells: g, h, i, ...
XLab = ['' for i in range(0, 100)]
for L in range(0, LType+1):
for M in range(0, LType-L+1):
for I in range(1, L+1):
XLab[I] = 'X'
for I in range(1, M+1):
XLab[L+I] = 'Y'
for I in range(1, LType-L-M+1):
XLab[L+M+I] = 'Z'
yield "".join(XLab[1:LType+1])
class UncontractedBasisSet:
def __init__(self, nbfs, exponents, shell_types, shell_coords, shell_atoms):
"""
initialize basis for `nbfs` primitive Gaussian functions
Parameters
----------
nbfs : int, number of basis functions
exponents : 1d numpy array of floats, list of exponents of Gaussians
shell_types : 1d numpy array of integers,
angular momentum of each shell (0: s, 1: p, 2: d, ...)
shell_coords: centers of basis functions,
shell_coords[:,i] are the cartesian coordinates for the i-th shell
shell_atoms : map between shell index and atom index
"""
assert np.all(shell_types >= 0), "Only cartesian basis functions (6D, 10F, ...) are supported!"
self.nbfs = nbfs
self.exponents = np.zeros(nbfs, dtype=float)
self.powers = np.zeros((3,nbfs), dtype=int)
self.centers = np.zeros((3,nbfs), dtype=float)
# index of atomic center to which the basis function belongs
self.atom_ids = np.zeros(nbfs, dtype=int)
# index of angular momentum shell
self.shell_index = np.zeros(nbfs, dtype=int)
ibf = 0 # `ibf` counts cartesian basis functions
# `ish` counts the shells
for ish, (alpha, ltype) in enumerate(zip(exponents, shell_types)):
# enumerate cartesian basis functions in this shell in the same order
# used by Gaussian 09
for powstr in cartesian_shell_ordering(ltype):
self.exponents[ibf] = alpha
self.powers[:,ibf] = powstr.count('X'), powstr.count('Y'), powstr.count('Z')
self.centers[:,ibf] = shell_coords[:,ish]
#
self.atom_ids[ibf] = shell_atoms[ish]
self.shell_index[ibf] = ish
ibf += 1
assert ibf == nbfs
def wavefunction(self, orb, x,y,z):
"""
Parameters
----------
orb : MO coefficients, orb[i] is the coefficient of the i-th basis function
x,y,z: numpy grids with x-,y- and z-coordinates at which the orbital should
be evaluated
Returns
-------
wfn: amplitude of orbital at the grid points
"""
wfn = 0j*x
assert len(orb) == self.nbfs
for i in range(0, self.nbfs):
wfn += orb[i] * wavefunction(self.exponents[i], self.powers[:,i], self.centers[:,i], x,y,z)
return wfn
def wavefunction(alpha, lmn, A, x,y,z):
"""
evaluate a cartesian Gaussian basis function on a grid
"""
l,m,n = lmn
nrm = norm(alpha,(l,m,n))
r2 = (x-A[0])**2 + (y-A[1])**2 + (z-A[2])**2
wfn = nrm*(x-A[0])**l * (y-A[1])**m * (z-A[2])**n * np.exp(-alpha*r2)
return wfn
if __name__ == "__main__":
# print ordering of basis functions in each shell
for LType in [0,1,2,3,4,5,6,7]:
print( "Shell Type: %s" % LType )
print( list(cartesian_shell_ordering(LType)) )
| 35.699187
| 103
| 0.535413
|
39e97e5fb5aa203a246f0e6f20206713946c4eac
| 2,677
|
py
|
Python
|
eas_controller/queue_management/schedule_all_waiting_tasks.py
|
dcf21/plato-wp36-v2
|
9e7a215fbc0c3a1ce38369cbb54ffb7f84a9d8d7
|
[
"MIT"
] | null | null | null |
eas_controller/queue_management/schedule_all_waiting_tasks.py
|
dcf21/plato-wp36-v2
|
9e7a215fbc0c3a1ce38369cbb54ffb7f84a9d8d7
|
[
"MIT"
] | null | null | null |
eas_controller/queue_management/schedule_all_waiting_tasks.py
|
dcf21/plato-wp36-v2
|
9e7a215fbc0c3a1ce38369cbb54ffb7f84a9d8d7
|
[
"MIT"
] | null | null | null |
#!../../data/datadir_local/virtualenv/bin/python3
# -*- coding: utf-8 -*-
# schedule_all_waiting_tasks.py
"""
Schedule all tasks in the database which have not yet been queued
"""
import argparse
import logging
import os
import time
from plato_wp36 import settings, task_database, task_queues
def schedule_jobs():
"""
Schedule all tasks in the database which have not yet been queued.
:return:
None
"""
# Read list of task types from the database
task_db = task_database.TaskDatabaseConnection()
# Open connection to the message queue
message_bus = task_queues.TaskQueue()
# Fetch list of all the tasks to schedule
# This is all tasks which do not have an existing scheduling attempt, and which also do not require any file
# products which have not passed QC.
task_db.conn.execute("""
SELECT t.taskId, ett.taskName
FROM eas_task t
INNER JOIN eas_task_types ett on t.taskTypeId = ett.taskTypeId
WHERE
NOT EXISTS (SELECT 1 FROM eas_scheduling_attempt x WHERE x.taskId = t.taskId)
AND
NOT EXISTS (SELECT 1 FROM eas_task_input y INNER JOIN eas_product z on y.inputId = z.productId
WHERE y.taskId = t.taskId AND
NOT EXISTS (SELECT 1 FROM eas_product_version v WHERE v.productId=z.productId AND v.passedQc))
ORDER BY t.taskId;
""")
tasks = task_db.conn.fetchall()
# Schedule each job in turn
for item in tasks:
queue_name = item['taskName']
task_id = item['taskId']
logging.info("Scheduling {:6d} - {:s}".format(task_id, queue_name))
attempt_id = task_db.execution_attempt_register(task_id=task_id)
message_bus.queue_publish(queue_name=queue_name, message=attempt_id)
# Close connection
message_bus.close()
# Commit database
task_db.commit()
task_db.close_db()
if __name__ == "__main__":
# Read command-line arguments
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
# Fetch testbench settings
settings = settings.Settings()
# Set up logging
log_file_path = os.path.join(settings.settings['dataPath'], 'plato_wp36.log')
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
handlers=[
logging.FileHandler(log_file_path),
logging.StreamHandler()
])
logger = logging.getLogger(__name__)
logger.info(__doc__.strip())
# Reschedule tasks
while True:
schedule_jobs()
time.sleep(5)
| 30.420455
| 112
| 0.658947
|
249fb771b33317eccf1e71f7a906da1c1633b9ab
| 36
|
py
|
Python
|
easydata/exceptions.py
|
easydatapy/easydata
|
5e76bf7fc9f368065a82ccc99fca54b17f7e91bd
|
[
"BSD-3-Clause"
] | 6
|
2020-09-06T19:06:01.000Z
|
2020-09-09T23:19:21.000Z
|
easydata/exceptions.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | null | null | null |
easydata/exceptions.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T17:59:20.000Z
|
2021-07-22T17:59:20.000Z
|
class DropItem(Exception):
pass
| 12
| 26
| 0.722222
|
49fb5268d8f0effb519253997c5a0c6f38688258
| 4,788
|
py
|
Python
|
quickstart_templates/Good usage of blacklist.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
quickstart_templates/Good usage of blacklist.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
quickstart_templates/Good usage of blacklist.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
"""
This template is written by @jeremycjang
What does this quickstart script aim to do?
- Here's the configuration I use the most.
NOTES:
- Read the incredibly amazing advices & ideas from my experience at the end of this file :>
"""
import time
import os
from tempfile import gettempdir
from instapy import InstaPy
from instapy.util import smart_run
from selenium.common.exceptions import NoSuchElementException
insta_username = 'username'
insta_password = 'password'
# get a session!
session = InstaPy(username=insta_username,
password=insta_password,
use_firefox=True,
page_delay=20,
bypass_suspicious_attempt=False,
nogui=False,
multi_logs=True)
# let's go! :>
with smart_run(session):
# settings
""" I don't use relationship bounds, but messed with it before and had some arbitrary numbers here
"""
session.set_relationship_bounds(enabled=False,
potency_ratio=-1.21,
delimit_by_numbers=True,
max_followers=99999999,
max_following=5000,
min_followers=70,
min_following=10)
""" Create a blacklist campaign to avoid bot interacting with users again. I never turn this off
"""
session.set_blacklist(enabled=True, campaign='blacklist')
session.set_do_like(enabled=True, percentage=100)
session.set_do_comment(enabled=True, percentage=100)
session.set_comments([':thumbsup:', ':raising_hands:', 'comment3'], media='Photo')
session.set_comments(['comment4', ':smiling_face_with_sunglasses: :thumbsup:', ':comment6'], media='Video')
#session.set_dont_include(['friend1', 'friend2', 'friend3'])
session.set_dont_like(['#naked', '#sex', '#fight'])
session.set_user_interact(amount=1, randomize=False, percentage=50)
session.set_simulation(enabled=True)
# activity
""" First follow user followers leaves comments on these user's posts...
"""
session.follow_user_followers(['user1', 'user2', 'user3'], amount=125, randomize=False, interact=True, sleep_delay=600)
""" Second follow user follows doesn't comment on users' posts...
"""
session.follow_user_followers(['user4', 'user5'], amount=50, randomize=False, interact=False, sleep_delay=600)
""" Unfollow amount intentionally set higher than follow amount to catch accounts that were not unfollowed last run.
Blacklist set to false as this seems to allow more users to get unfollowed for whatever reason.
"""
session.set_blacklist(enabled=False, campaign='blacklist')
session.unfollow_users(amount=1000, InstapyFollowed=(True,"all"), style="FIFO", unfollow_after=None, sleep_delay=600)
"""
EXTRA NOTES:
1-) A blacklist is used and never turned off so as to never follow the same user twice (unless their username is changed)
2-) The program is set to follow 475 people because this is the largest amount I've found so far that can be followed, commented on and unfollowed successfully within 24 hours. This can be customized of course, but please let me know if anyone's found a larger amount that can be cycled in 24 hours~
3-) Running this program every day, the program never actually follows a full 475 people because it doesn't grab enough links or grabs the links of people that have been followed already.
4-) I still have never observed the `media` parameter within `set comments` do anything, so a random comment from the 6 gets picked regardless of the media type
5-) For unknown reasons, the program will always prematurely end the unfollow portion without unfollowing everyone. More on this later
6-) I use two ```follow_user_followers``` sessions because I believe the comments I use are only well-received by the followers of users in the first ```follow_user_followers``` action.
7-) Linux PRO-tip: This is a really basic command line syntax that I learned yesterday, but less technical people may not have know about it as well. using `&&` in terminal, you can chain InstaPy programs! if you send:
```
python InstaPyprogram1 && python InstaPyprogram2
```
The shell will interpret it as "Run the InstaPyprogram1, then once it successfully completes immediately run InstaPyprogram2".
Knowing this, my workaround for the premature unfollow actions ending is to chain my template with another program that only has the unfollow code. There's no limit to how many programs you can chain with `&&`, so you can use your imagination on what can be accomplished :)
Hope this helps! Open to any feedback and improvements anyone can suggest ^.^
"""
| 44.333333
| 299
| 0.70447
|
3f2b8eda8c8a56a68ceeddb8f151c1a565e3be57
| 1,355
|
py
|
Python
|
delira/training/callbacks/abstract_callback.py
|
NKPmedia/delira
|
a10227e30c14c6507a1790813e53572e0d841c21
|
[
"BSD-2-Clause"
] | null | null | null |
delira/training/callbacks/abstract_callback.py
|
NKPmedia/delira
|
a10227e30c14c6507a1790813e53572e0d841c21
|
[
"BSD-2-Clause"
] | null | null | null |
delira/training/callbacks/abstract_callback.py
|
NKPmedia/delira
|
a10227e30c14c6507a1790813e53572e0d841c21
|
[
"BSD-2-Clause"
] | null | null | null |
class AbstractCallback(object):
"""
Implements abstract callback interface.
All callbacks should be derived from this class
See Also
--------
:class:`AbstractNetworkTrainer`
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args :
positional arguments
**kwargs :
keyword arguments
"""
pass
def at_epoch_begin(self, trainer, **kwargs):
"""
Function which will be executed at begin of each epoch
Parameters
----------
trainer : :class:`AbstractNetworkTrainer`
**kwargs :
additional keyword arguments
Returns
-------
dict
modified trainer attributes, where the name must correspond to the
trainer's attribute name
"""
return {}
def at_epoch_end(self, trainer, **kwargs):
"""
Function which will be executed at end of each epoch
Parameters
----------
trainer : :class:`AbstractNetworkTrainer`
**kwargs :
additional keyword arguments
Returns
-------
dict
modified trainer attributes, where the name must correspond to the
trainer's attribute name
"""
return {}
| 21.854839
| 78
| 0.526937
|
9bbcd840421e5363aa5a98a3e56541d3776c2f96
| 729
|
py
|
Python
|
tests/src/CRC/navigate_to_crc_and_click_on_logout.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
tests/src/CRC/navigate_to_crc_and_click_on_logout.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | 2
|
2022-02-01T00:55:12.000Z
|
2022-03-29T22:29:09.000Z
|
tests/src/CRC/navigate_to_crc_and_click_on_logout.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
from Data.parameters import Data
from reuse_func import GetData
class Logout_function():
def __init__(self,driver):
self.driver = driver
def test_logout(self):
self.p = GetData()
self.driver.implicitly_wait(20)
self.driver.find_element_by_xpath(Data.hyper).click()
self.p.page_loading(self.driver)
self.driver.find_element_by_id(Data.logout).click()
self.p.page_loading(self.driver)
if "Log in to cQube" in self.driver.title:
print("login page is displayed")
else:
print("logout is not working")
data = GetData()
data.login_cqube(self.driver)
self.p.page_loading(self.driver)
data.navigate_to_crc_report()
| 30.375
| 60
| 0.662551
|
a6820298871d6494d6e543fd1138a7af9f754b42
| 198
|
py
|
Python
|
organisatie/views.py
|
markvangeffen/public_dashboard
|
81521f93ab6337e4f90f58722811e0c8358174a8
|
[
"MIT"
] | null | null | null |
organisatie/views.py
|
markvangeffen/public_dashboard
|
81521f93ab6337e4f90f58722811e0c8358174a8
|
[
"MIT"
] | null | null | null |
organisatie/views.py
|
markvangeffen/public_dashboard
|
81521f93ab6337e4f90f58722811e0c8358174a8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class OrganisatieView(TemplateView):
template_name = 'organisatie/organisatie.html'
| 28.285714
| 50
| 0.813131
|
e6adc6d768e40d5d4c5f11c9fc142ce3bbeeb307
| 1,148
|
py
|
Python
|
app/tests/core/storage/test_redis.py
|
jmgilman/fapi
|
9a63ed18bd119efa4d1e93f1c9d6ded22e6d39d2
|
[
"MIT"
] | 1
|
2022-01-24T22:28:22.000Z
|
2022-01-24T22:28:22.000Z
|
app/tests/core/storage/test_redis.py
|
jmgilman/bapi
|
9a63ed18bd119efa4d1e93f1c9d6ded22e6d39d2
|
[
"MIT"
] | null | null | null |
app/tests/core/storage/test_redis.py
|
jmgilman/bapi
|
9a63ed18bd119efa4d1e93f1c9d6ded22e6d39d2
|
[
"MIT"
] | null | null | null |
from unittest import mock
from app.core import settings
from app.core.storage.redis import RedisConfig, RedisStorage
import redis
@mock.patch("bdantic.models.BeancountFile.parse")
@mock.patch("bdantic.models.BeancountFile.decompress")
@mock.patch("beancount.loader.load_string")
@mock.patch("redis.StrictRedis.pubsub")
@mock.patch("redis.StrictRedis.get")
def test_load(get, pubsub, loader, decompress, parse):
contents = ([], [], {})
ps = mock.Mock(redis.client.PubSub)
pubsub.return_value = ps
get.return_value = b"test"
loader.return_value = contents
parse.return_value = "parsed"
stgs = settings.Settings()
stgs.redis = RedisConfig()
storage = RedisStorage(stgs)
result = storage.load()
assert result == "parsed"
ps.subscribe.assert_called_once_with(stgs.redis.channel)
get.assert_called_once_with(stgs.redis.key)
loader.assert_called_once_with("test")
parse.assert_called_once_with(contents)
# Cached
get.return_value = "bytes"
stgs.redis.cached = True
storage = RedisStorage(stgs)
result = storage.load()
decompress.assert_called_once_with("bytes")
| 28
| 60
| 0.72561
|
3df4bea014f3e1b4b0fe4fc8a6bc5e92fa411701
| 957
|
py
|
Python
|
csv_sampler.py
|
tyhunt99/random
|
d50b47b7bba065a15cb36675ce2a8d0954f04281
|
[
"MIT"
] | null | null | null |
csv_sampler.py
|
tyhunt99/random
|
d50b47b7bba065a15cb36675ce2a8d0954f04281
|
[
"MIT"
] | null | null | null |
csv_sampler.py
|
tyhunt99/random
|
d50b47b7bba065a15cb36675ce2a8d0954f04281
|
[
"MIT"
] | null | null | null |
'''
This will read CSV_IN and loop over it until SAMPLE_SIZE is reached and
output it to CSV_OUT. Be sure to specify the delimiter and quote char.
It will maintain these values in the sample output csv.
Notes:
* This will truncate the output file or create it if needed
'''
import csv
CSV_IN = 'OUTPUT_1528387783_1734714987.csv'
CSV_OUT = 'sample_output.csv'
SAMPLE_SIZE = 1
CSV_DELIMITER = ','
CSV_QUOTECHAR = '"'
with open(CSV_IN) as infile, open(CSV_OUT, 'w') as outfile:
rownum = 0
reader = csv.reader(
infile,
delimiter=CSV_DELIMITER,
quotechar=CSV_QUOTECHAR,
)
writer = csv.writer(
outfile,
delimiter=CSV_DELIMITER,
quotechar=CSV_QUOTECHAR,
quoting=csv.QUOTE_ALL,
)
for row in reader:
if rownum <= SAMPLE_SIZE:
writer.writerow(row)
rownum += 1
else: # if the desired sample size has been achieved exit
break
| 24.538462
| 71
| 0.654127
|
c0810d807d15f9124bbf0cbe44b504a70fb436d4
| 1,401
|
py
|
Python
|
scripts/discourse_1739.py
|
gmatteo/awesome-panel
|
7eb6965f4b3a7eca08c07561e631e5beb189ffd3
|
[
"Apache-2.0"
] | 179
|
2019-12-04T14:54:53.000Z
|
2022-03-30T09:08:38.000Z
|
scripts/discourse_1739.py
|
hbueno/awesome-panel
|
fb27bcaf265cef1278cfa0c78799fbbf6c9a6834
|
[
"Apache-2.0"
] | 62
|
2019-12-14T16:51:28.000Z
|
2022-03-19T18:47:12.000Z
|
scripts/discourse_1739.py
|
hbueno/awesome-panel
|
fb27bcaf265cef1278cfa0c78799fbbf6c9a6834
|
[
"Apache-2.0"
] | 35
|
2019-12-08T13:19:53.000Z
|
2022-03-25T10:33:02.000Z
|
import numpy as np
import pandas as pd
import panel as pn
import panel.widgets as pnw
from matplotlib.backends.backend_agg import FigureCanvas
from matplotlib.figure import Figure
DATA_URL = (
"https://raw.githubusercontent.com/LuisM78/Occupancy-detection-data/master/datatraining.txt"
)
data = pd.read_csv(DATA_URL)
data["date"] = data.date.astype("datetime64[ns]")
data = data.set_index("date")
variable = pnw.RadioButtonGroup(name="variable", value="Temperature", options=list(data.columns))
window = pnw.IntSlider(name="window", value=10, start=1, end=60)
def mpl_plot(avg, highlight):
fig = Figure()
FigureCanvas(fig) # not needed in mpl >= 3.1
ax = fig.add_subplot()
avg.plot(ax=ax)
if len(highlight):
highlight.plot(style="o", ax=ax)
return fig
def find_outliers(variable="Temperature", window=30, sigma=10, view_fn=mpl_plot):
avg = data[variable].rolling(window=window).mean()
residual = data[variable] - avg
std = residual.rolling(window=window).std()
outliers = np.abs(residual) > std * sigma
return view_fn(avg, avg[outliers])
@pn.depends(variable, window)
def reactive_outliers(variable, window):
return find_outliers(variable, window, 10)
widgets = pn.Column("<br>\n# Room occupancy", variable, window)
occupancy = pn.Row(reactive_outliers, widgets)
occupancy.servable()
| 30.456522
| 98
| 0.700214
|
f0212de2ceeb06d53133c239dedd8509261292cc
| 70
|
py
|
Python
|
thetangle/__init__.py
|
kaalam/thetangle
|
4c4877ebc3c6f8cce86f1a43681359c16a51e2c9
|
[
"MIT"
] | 1
|
2021-11-20T12:30:02.000Z
|
2021-11-20T12:30:02.000Z
|
thetangle/__init__.py
|
kaalam/thetangle
|
4c4877ebc3c6f8cce86f1a43681359c16a51e2c9
|
[
"MIT"
] | null | null | null |
thetangle/__init__.py
|
kaalam/thetangle
|
4c4877ebc3c6f8cce86f1a43681359c16a51e2c9
|
[
"MIT"
] | null | null | null |
__version__ = '0.5.3'
from thetangle.TheTangle import TangleExplorer
| 17.5
| 46
| 0.8
|
ffa637531b28f5bc547f58662450a3f37531f1e7
| 2,747
|
py
|
Python
|
examples/crontabReader.py
|
Davoodeh/cron-descriptor
|
b15f435d10e6497a0647fdaca51539931f2eddc5
|
[
"MIT"
] | 98
|
2016-01-20T00:52:11.000Z
|
2022-03-10T16:27:31.000Z
|
examples/crontabReader.py
|
Davoodeh/cron-descriptor
|
b15f435d10e6497a0647fdaca51539931f2eddc5
|
[
"MIT"
] | 37
|
2016-02-03T17:26:39.000Z
|
2022-03-23T22:44:32.000Z
|
examples/crontabReader.py
|
Davoodeh/cron-descriptor
|
b15f435d10e6497a0647fdaca51539931f2eddc5
|
[
"MIT"
] | 26
|
2016-01-19T16:24:02.000Z
|
2022-02-15T20:22:43.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Adam Schubert
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
try:
from cron_descriptor import Options, ExpressionDescriptor
except ImportError:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('\033[1mFailed to import cron_descriptor, maybe ? "pip install cron-descriptor ?"\033[0m')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
raise
class CrontabReader(object):
"""
Simple example reading /etc/contab
"""
rex = re.compile(r"^(\S{1,3}\s+\S{1,3}\s+\S{1,3}\s+\S{1,3}\s+\S{1,3}).+$")
def __init__(self, cronfile):
"""Initialize CrontabReader
Args:
cronfile: Path to cronfile
Returns:
None
"""
options = Options()
options.day_of_week_start_index_zero = False
options.use_24hour_time_format = True
with open(cronfile) as f:
for line in f.readlines():
parsed_line = self.parse_cron_line(line)
if parsed_line:
print("{} -> {}".format(parsed_line, ExpressionDescriptor(parsed_line, options)))
def parse_cron_line(self, line):
"""Parses crontab line and returns only starting time string
Args:
line: crontab line
Returns:
Time part of cron line
"""
stripped = line.strip()
if stripped and stripped.startswith('#') is False:
rexres = self.rex.search(stripped)
if rexres:
return ' '.join(rexres.group(1).split())
return None
CrontabReader('/etc/crontab')
| 35.675325
| 101
| 0.620677
|
2e4edb9f72bdcba9a02b41fc273d54b577e4c153
| 717
|
py
|
Python
|
tests/test_server.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 4
|
2022-02-17T19:47:52.000Z
|
2022-02-17T20:11:06.000Z
|
tests/test_server.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 2
|
2022-03-26T00:07:05.000Z
|
2022-03-30T21:20:00.000Z
|
tests/test_server.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 1
|
2022-02-18T13:33:00.000Z
|
2022-02-18T13:33:00.000Z
|
import pytest
from sanic import Sanic, response
from labfunctions import server
from labfunctions.conf.server_settings import settings
from labfunctions.db.nosync import AsyncSQL
def test_server_app():
app = server.create_app(settings, ["events"])
assert isinstance(app, Sanic)
# assert isinstance(server.app.ctx.db, AsyncSQL)
def test_server_init_bp():
app = Sanic("test_app_unique")
server.init_blueprints(app, ["events", "workflows", "projects", "history"])
assert len(app.blueprints) == 4
@pytest.mark.asyncio
async def test_server_status(sanic_app):
req, res = await sanic_app.asgi_client.get("/status")
assert res.status == 200
assert res.json["msg"] == "We are ok"
| 25.607143
| 79
| 0.729428
|
4538b12df0c5e900fc6534d26c13abcb5a726739
| 1,248
|
py
|
Python
|
codes/Lib/site-packages/openpyxl/drawing/__init__.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | 8
|
2016-05-27T12:13:16.000Z
|
2019-08-05T13:49:11.000Z
|
codes/Lib/site-packages/openpyxl/drawing/__init__.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | 3
|
2019-07-29T09:47:34.000Z
|
2019-07-29T09:47:35.000Z
|
flask/lib/python3.6/site-packages/openpyxl/drawing/__init__.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-21T00:07:30.000Z
|
2021-07-21T00:07:30.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from .drawing import *
| 48
| 79
| 0.778846
|
8e5306ded8fee28b336607448f5a172cc5b8e97e
| 397
|
py
|
Python
|
cartpole_trainer.py
|
djdnx/DQN
|
4be919d51d0a3e008bf41c3afcc76170056b9b0c
|
[
"MIT"
] | null | null | null |
cartpole_trainer.py
|
djdnx/DQN
|
4be919d51d0a3e008bf41c3afcc76170056b9b0c
|
[
"MIT"
] | null | null | null |
cartpole_trainer.py
|
djdnx/DQN
|
4be919d51d0a3e008bf41c3afcc76170056b9b0c
|
[
"MIT"
] | null | null | null |
"""File demonstrating how to train an agent to solve the 'CartPole-v0' gym environment."""
import gym
from DQN import dqn_trainer
env = gym.make("CartPole-v0")
agent = dqn_trainer.DQNAgent(env, hidden_architecture=(["relu", 64],))
agent.learn("cartpole_tf_model/q_model", "cartpole_tensorboard_dir", 400,
prioritised_experience_replay=True, num_annealing_steps=10000)
env.close()
| 28.357143
| 90
| 0.753149
|
0e0a5fe1236daac7710ff7d2cc4c8e74109167fd
| 8,865
|
py
|
Python
|
stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/Schaefer100_parcellation/scripts/CBIG_pMFM_step1_training_Schaefer100.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | 1
|
2021-12-18T09:35:59.000Z
|
2021-12-18T09:35:59.000Z
|
stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/Schaefer100_parcellation/scripts/CBIG_pMFM_step1_training_Schaefer100.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | null | null | null |
stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/Schaefer100_parcellation/scripts/CBIG_pMFM_step1_training_Schaefer100.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | null | null | null |
# /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import time
import torch
import CBIG_pMFM_basic_functions as fc
import warnings
def get_init(myelin_data, gradient_data, highest_order, init_para):
'''
This function is implemented to calculate the initial parametrized
coefficients
'''
n_node = myelin_data.shape[0]
amatrix = np.zeros((n_node, highest_order + 1))
bmatrix = np.zeros((n_node, highest_order + 1))
for i in range(highest_order + 1):
amatrix[:, i] = myelin_data**(i)
bmatrix[:, i] = gradient_data**(i)
cmatrix = np.hstack((amatrix, bmatrix[:, 1:highest_order + 1]))
para = np.linalg.inv(cmatrix.T @ cmatrix) @ cmatrix.T @ init_para
return para, cmatrix
def CBIG_mfm_optimization_desikan_main(gpu_index=0, random_seed=1):
'''
This function is to implement the optimization processes of mean
field model.
The objective function is the summation of FC correlation cost and
FCD KS statistics cost.
The optimization process is highly automatic and generate 500
candidate parameter sets for
main results.
Args:
gpu_index: index of gpu used for optimization
random_seed: random seed for optimization
output_path: output directory for saving optimized model
parameters
Returns:
None
'''
# Setting random seed and GPU
torch.cuda.set_device(gpu_index)
random_seed_cuda = random_seed
random_seed_np = random_seed
torch.manual_seed(random_seed_cuda)
rng = np.random.Generator(np.random.PCG64(random_seed_np))
# Create output folders
output_path = '../output/step1_training_results/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Initializing input parameters
highest_order = 1
N = 3 * (2 * highest_order + 1) + 1
myelin_data = fc.csv_matrix_read(
'../../../input/Schaefer100_input/myelin.csv')
myelin_data = myelin_data[:, 0]
gradient_data = fc.csv_matrix_read(
'../../../input/Schaefer100_input/rsfc_gradient.csv')
gradient_data = gradient_data[:, 0]
n_node = myelin_data.shape[0]
dim = n_node * 3 + 1
search_range = np.zeros((dim, 2))
search_range[0:n_node, :] = [0, 1]
search_range[n_node:n_node * 2, :] = [0, 0.5]
search_range[n_node * 2, :] = [1, 10]
search_range[n_node * 2 + 1:dim, :] = [0.0005, 0.01]
init_para = rng.uniform(0, 1, dim) * (
search_range[:, 1] - search_range[:, 0]) + search_range[:, 0]
start_point_w, template_mat = get_init(myelin_data, gradient_data,
highest_order, init_para[0:n_node])
start_point_i, template_mat = get_init(myelin_data, gradient_data,
highest_order,
init_para[n_node:n_node * 2])
start_point_sigma, template_mat = get_init(myelin_data, gradient_data,
highest_order,
init_para[n_node * 2 + 1:dim])
# Initializing childrens
xmean = np.zeros(N)
xmean[0:2 * highest_order + 1] = start_point_w
xmean[2 * highest_order + 1:2 * (2 * highest_order + 1)] = start_point_i
xmean[2 * (2 * highest_order + 1)] = init_para[2 * n_node]
xmean[2 * (2 * highest_order + 1) + 1:N] = start_point_sigma
# Initializing optimization hyper-parameters
sigma = 0.15
sigmaS = 0.15
stoppoint = 0.4
maxloop = 500
n_dup = 3
# CMA-ES parameters setting
Lambda = 500
mu = 40
weights = np.log(mu + 1 / 2) - np.log(np.arange(1, mu + 1))
weights = weights / np.sum(weights)
mueff = 1 / np.sum(weights**2)
# Strategy parameter setting: adaptation
cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
cs = (mueff + 2) / (N + mueff + 5)
c1 = 2 / ((N + 1.3)**2 + mueff)
cmu = np.minimum(1 - c1,
2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))
damps = 1 + 2 * np.maximum(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs
# Initializing dynamic strategy parameters and constants'''
pc = np.zeros(N)
ps = np.zeros(N)
B = np.eye(N)
D = np.zeros(N)
D[0:2 * highest_order + 1] = start_point_w[0] / 2
D[2 * highest_order + 1:2 * (2 * highest_order + 1)] = start_point_i[0] / 2
D[2 * (2 * highest_order + 1)] = 0.4
D[2 * (2 * highest_order + 1) + 1:N] = 0.001 / 2
C = np.dot(np.dot(B, np.diag(np.power(D, 2))), B.T)
invsqrtC = np.dot(np.dot(B, np.diag(np.power(D, -1))), B.T)
chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N ^ 2))
# Evolution loop
countloop = 0
arx = np.zeros([N, Lambda])
input_para = np.zeros((dim, Lambda))
xmin = np.zeros([N + 3, maxloop])
stop_count = 0
while countloop < maxloop:
start_time = time.time()
# Generating lambda offspring
arx[:, 0] = xmean
j = 0
while j < Lambda:
arx[:, j] = xmean + sigma * np.dot(B, (D * rng.standard_normal(N)))
input_para[0:n_node, j] = template_mat @ arx[0:2 * highest_order +
1, j]
input_para[n_node:2 * n_node,
j] = template_mat @ arx[2 * highest_order + 1:2 *
(2 * highest_order + 1), j]
input_para[2 * n_node:2 * n_node +
1, j] = arx[2 * (2 * highest_order + 1), j]
input_para[2 * n_node + 1:dim, j] = template_mat @ arx[2 * (
2 * highest_order + 1) + 1:N, j]
if (input_para[:, j] < search_range[:, 0]).any() or (
input_para[:, j] > search_range[:, 1]).any():
j = j - 1
j = j + 1
# Calculating costs of offspring
total_cost, fc_cost, fcd_cost = fc.CBIG_combined_cost_train(
input_para, n_dup)
countloop = countloop + 1
# Sort by total cost and compute weighted mean
arfitsort = np.sort(total_cost)
arindex = np.argsort(total_cost)
xold = xmean
xmean = np.dot(arx[:, arindex[0:mu]], weights)
xshow = xmean - xold
# Cumulation
ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(
invsqrtC, xshow) / sigma
hsig = (np.linalg.norm(ps) / np.sqrt(1 - (1 - cs)**
(2 * countloop)) / chiN <
(1.4 + 2 / (N + 1))) * 1
pc = (1 - cc) * pc + hsig * np.sqrt(cc *
(2 - cc) * mueff) * xshow / sigma
# Adapting covariance matrix C
artmp = (1 / sigma) * (
arx[:, arindex[0:mu]] - np.tile(xold, [mu, 1]).T)
C = (1 - c1 - cmu) * C + c1 * (
np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + cmu * np.dot(
artmp, np.dot(np.diag(weights), artmp.T))
# Adapting step size
sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))
sigma = min(sigma, sigmaS)
# Decomposition
if 1 > 1 / (c1 + cmu) / N / 10:
C = np.triu(C, k=1) + np.triu(C).T
D, B = np.linalg.eigh(C)
D = D.real
B = B.real
D = np.sqrt(D)
invsqrtC = np.dot(B, np.dot(np.diag(D**(-1)), B.T))
# Monitoring the evolution status
ps_norm = np.linalg.norm(ps)
print('******** Generation: ' + str(countloop) + ' ********')
print('Norm of P-sigma: ', ps_norm)
print('The mean of total cost: ', np.mean(arfitsort[0:mu]))
print('Sigma: ', sigma)
xmin[0:N, countloop - 1] = arx[:, arindex[0]]
xmin[N, countloop - 1] = fc_cost[arindex[0]]
xmin[N + 1, countloop - 1] = fcd_cost[arindex[0]]
xmin[N + 2, countloop - 1] = np.min(total_cost)
print('Best total cost: ', np.min(total_cost))
print('FC correlation cost: ', fc_cost[arindex[0]])
print('FCD KS statistics cost: ', fcd_cost[arindex[0]])
elapsed_time = time.time() - start_time
print('Elapsed time for this evolution is : ', elapsed_time)
print('******************************************')
# break
if arfitsort[0] < stoppoint and ps_norm < 11:
stop_count = stop_count + 1
if stop_count >= 5 or sigma < 0.001:
break
save_name = [output_path] + ['random_seed_', str(random_seed), '.csv']
np.savetxt(''.join(save_name), xmin, delimiter=',')
if __name__ == "__main__":
warnings.filterwarnings("ignore", category=RuntimeWarning)
for i in range(1, 11):
CBIG_mfm_optimization_desikan_main(random_seed=i)
| 37.723404
| 79
| 0.548111
|
def0b1bbc6f2b3455f09cdd9a1cf484d0bfe1b99
| 9,270
|
py
|
Python
|
tests/systems/maddpg_system_test.py
|
1998x-stack/Mava
|
2c8f7f59f235340886e92045b6730cf5a542a496
|
[
"Apache-2.0"
] | 1
|
2022-03-19T12:51:16.000Z
|
2022-03-19T12:51:16.000Z
|
tests/systems/maddpg_system_test.py
|
1998x-stack/Mava
|
2c8f7f59f235340886e92045b6730cf5a542a496
|
[
"Apache-2.0"
] | null | null | null |
tests/systems/maddpg_system_test.py
|
1998x-stack/Mava
|
2c8f7f59f235340886e92045b6730cf5a542a496
|
[
"Apache-2.0"
] | null | null | null |
# python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MADDPG."""
import functools
import launchpad as lp
import sonnet as snt
import mava
from mava.components.tf import architectures
from mava.components.tf.architectures.utils import fully_connected_network_spec
from mava.systems.tf import maddpg
from mava.utils import lp_utils
from mava.utils.enums import ArchitectureType
from mava.utils.environments import debugging_utils
class TestMADDPG:
"""Simple integration/smoke test for MADDPG."""
def test_maddpg_on_debugging_env(self) -> None:
"""Test feedforward maddpg."""
# environment
environment_factory = functools.partial(
debugging_utils.make_environment,
env_name="simple_spread",
action_space="continuous",
)
# networks
network_factory = lp_utils.partial_kwargs(
maddpg.make_default_networks, policy_networks_layer_sizes=(64, 64)
)
# system
system = maddpg.MADDPG(
environment_factory=environment_factory,
network_factory=network_factory,
num_executors=1,
batch_size=32,
min_replay_size=32,
max_replay_size=1000,
policy_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
critic_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
checkpoint=False,
)
program = system.build()
(trainer_node,) = program.groups["trainer"]
trainer_node.disable_run()
# Launch gpu config - don't use gpu
local_resources = lp_utils.to_device(
program_nodes=program.groups.keys(), nodes_on_gpu=[]
)
lp.launch(
program,
launch_type="test_mt",
local_resources=local_resources,
)
trainer: mava.Trainer = trainer_node.create_handle().dereference()
for _ in range(2):
trainer.step()
def test_recurrent_maddpg_on_debugging_env(self) -> None:
"""Test recurrent maddpg."""
# environment
environment_factory = functools.partial(
debugging_utils.make_environment,
env_name="simple_spread",
action_space="continuous",
)
# networks
network_factory = lp_utils.partial_kwargs(
maddpg.make_default_networks,
architecture_type=ArchitectureType.recurrent,
policy_networks_layer_sizes=(32, 32),
)
# system
system = maddpg.MADDPG(
environment_factory=environment_factory,
network_factory=network_factory,
num_executors=1,
batch_size=16,
min_replay_size=16,
max_replay_size=1000,
policy_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
critic_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
checkpoint=False,
trainer_fn=maddpg.training.MADDPGDecentralisedRecurrentTrainer,
executor_fn=maddpg.execution.MADDPGRecurrentExecutor,
sequence_length=4,
period=4,
bootstrap_n=2,
)
program = system.build()
(trainer_node,) = program.groups["trainer"]
trainer_node.disable_run()
# Launch gpu config - don't use gpu
local_resources = lp_utils.to_device(
program_nodes=program.groups.keys(), nodes_on_gpu=[]
)
lp.launch(
program,
launch_type="test_mt",
local_resources=local_resources,
)
trainer: mava.Trainer = trainer_node.create_handle().dereference()
for _ in range(2):
trainer.step()
def test_centralised_maddpg_on_debugging_env(self) -> None:
"""Test centralised maddpg."""
# environment
environment_factory = functools.partial(
debugging_utils.make_environment,
env_name="simple_spread",
action_space="continuous",
)
# networks
network_factory = lp_utils.partial_kwargs(
maddpg.make_default_networks,
policy_networks_layer_sizes=(32, 32),
)
# system
system = maddpg.MADDPG(
environment_factory=environment_factory,
network_factory=network_factory,
num_executors=1,
batch_size=16,
min_replay_size=16,
max_replay_size=1000,
policy_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
critic_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
checkpoint=False,
architecture=architectures.CentralisedQValueCritic,
trainer_fn=maddpg.MADDPGCentralisedTrainer,
shared_weights=False,
)
program = system.build()
(trainer_node,) = program.groups["trainer"]
trainer_node.disable_run()
# Launch gpu config - don't use gpu
local_resources = lp_utils.to_device(
program_nodes=program.groups.keys(), nodes_on_gpu=[]
)
lp.launch(
program,
launch_type="test_mt",
local_resources=local_resources,
)
trainer: mava.Trainer = trainer_node.create_handle().dereference()
for _ in range(2):
trainer.step()
def test_networked_maddpg_on_debugging_env(self) -> None:
"""Test networked maddpg."""
# environment
environment_factory = functools.partial(
debugging_utils.make_environment,
env_name="simple_spread",
action_space="continuous",
)
# networks
network_factory = lp_utils.partial_kwargs(
maddpg.make_default_networks,
policy_networks_layer_sizes=(32, 32),
)
# system
system = maddpg.MADDPG(
environment_factory=environment_factory,
network_factory=network_factory,
num_executors=1,
batch_size=16,
min_replay_size=16,
max_replay_size=1000,
policy_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
critic_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
checkpoint=False,
trainer_fn=maddpg.MADDPGNetworkedTrainer,
architecture=architectures.NetworkedQValueCritic,
connection_spec=fully_connected_network_spec,
shared_weights=False,
)
program = system.build()
(trainer_node,) = program.groups["trainer"]
trainer_node.disable_run()
# Launch gpu config - don't use gpu
local_resources = lp_utils.to_device(
program_nodes=program.groups.keys(), nodes_on_gpu=[]
)
lp.launch(
program,
launch_type="test_mt",
local_resources=local_resources,
)
trainer: mava.Trainer = trainer_node.create_handle().dereference()
for _ in range(2):
trainer.step()
def test_state_based_maddpg_on_debugging_env(self) -> None:
"""Test state based maddpg."""
# environment
environment_factory = functools.partial(
debugging_utils.make_environment,
env_name="simple_spread",
action_space="continuous",
return_state_info=True,
)
# networks
network_factory = lp_utils.partial_kwargs(
maddpg.make_default_networks,
policy_networks_layer_sizes=(32, 32),
)
# system
system = maddpg.MADDPG(
environment_factory=environment_factory,
network_factory=network_factory,
num_executors=1,
batch_size=16,
min_replay_size=16,
max_replay_size=1000,
policy_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
critic_optimizer=snt.optimizers.Adam(learning_rate=1e-4),
checkpoint=False,
trainer_fn=maddpg.MADDPGStateBasedTrainer,
architecture=architectures.StateBasedQValueCritic,
shared_weights=False,
)
program = system.build()
(trainer_node,) = program.groups["trainer"]
trainer_node.disable_run()
# Launch gpu config - don't use gpu
local_resources = lp_utils.to_device(
program_nodes=program.groups.keys(), nodes_on_gpu=[]
)
lp.launch(
program,
launch_type="test_mt",
local_resources=local_resources,
)
trainer: mava.Trainer = trainer_node.create_handle().dereference()
for _ in range(2):
trainer.step()
| 31.85567
| 79
| 0.620928
|
01d45abc54fef817c8d1bd05fa6b9630fe8ca75b
| 18,485
|
py
|
Python
|
plugins/modules/oci_database_autonomous_exadata_infrastructure_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_database_autonomous_exadata_infrastructure_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_database_autonomous_exadata_infrastructure_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_autonomous_exadata_infrastructure_facts
short_description: Fetches details about one or multiple AutonomousExadataInfrastructure resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple AutonomousExadataInfrastructure resources in Oracle Cloud Infrastructure
- Gets a list of the Autonomous Exadata Infrastructures in the specified compartment.
- If I(autonomous_exadata_infrastructure_id) is specified, the details of a single AutonomousExadataInfrastructure will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
autonomous_exadata_infrastructure_id:
description:
- The Autonomous Exadata Infrastructure L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to get a specific autonomous_exadata_infrastructure.
type: str
aliases: ["id"]
compartment_id:
description:
- The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to list multiple autonomous_exadata_infrastructures.
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME
is ascending. The DISPLAYNAME sort order is case sensitive.
- " **Note:** If you do not include the availability domain filter, the resources are grouped by availability domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
lifecycle_state:
description:
- A filter to return only resources that match the given lifecycle state exactly.
type: str
choices:
- "PROVISIONING"
- "AVAILABLE"
- "UPDATING"
- "TERMINATING"
- "TERMINATED"
- "FAILED"
- "MAINTENANCE_IN_PROGRESS"
availability_domain:
description:
- A filter to return only resources that match the given availability domain exactly.
type: str
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List autonomous_exadata_infrastructures
oci_database_autonomous_exadata_infrastructure_facts:
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: Get a specific autonomous_exadata_infrastructure
oci_database_autonomous_exadata_infrastructure_facts:
autonomous_exadata_infrastructure_id: "ocid1.autonomousexadatainfrastructure.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
autonomous_exadata_infrastructures:
description:
- List of AutonomousExadataInfrastructure resources
returned: on success
type: complex
contains:
id:
description:
- The OCID of the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The user-friendly name for the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: display_name_example
availability_domain:
description:
- The name of the availability domain that the Autonomous Exadata Infrastructure is located in.
returned: on success
type: str
sample: Uocm:PHX-AD-1
subnet_id:
description:
- The OCID of the subnet the Autonomous Exadata Infrastructure is associated with.
- "**Subnet Restrictions:**
- For Autonomous Databases with Autonomous Exadata Infrastructure, do not use a subnet that overlaps with 192.168.128.0/20"
- These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and backup subnet.
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
nsg_ids:
description:
- "A list of the L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this
resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about
NSGs, see L(Security Rules,https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgIds array cannot be empty."
returned: on success
type: list
sample: []
shape:
description:
- The shape of the Autonomous Exadata Infrastructure. The shape determines resources to allocate to the Autonomous Exadata Infrastructure (CPU
cores, memory and storage).
returned: on success
type: str
sample: shape_example
hostname:
description:
- The host name for the Autonomous Exadata Infrastructure node.
returned: on success
type: str
sample: hostname_example
domain:
description:
- The domain name for the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: domain_example
lifecycle_state:
description:
- The current lifecycle state of the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycle state of the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: lifecycle_details_example
license_model:
description:
- The Oracle license model that applies to all databases in the Autonomous Exadata Infrastructure. The default is BRING_YOUR_OWN_LICENSE.
returned: on success
type: str
sample: LICENSE_INCLUDED
time_created:
description:
- The date and time the Autonomous Exadata Infrastructure was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
maintenance_window:
description:
- ""
returned: on success
type: complex
contains:
preference:
description:
- The maintenance window scheduling preference.
returned: on success
type: str
sample: NO_PREFERENCE
months:
description:
- Months during the year when maintenance should be performed.
returned: on success
type: complex
contains:
name:
description:
- Name of the month of the year.
returned: on success
type: str
sample: JANUARY
weeks_of_month:
description:
- Weeks during the month when maintenance should be performed. Weeks start on the 1st, 8th, 15th, and 22nd days of the month, and have a
duration of 7 days. Weeks start and end based on calendar dates, not days of the week.
For example, to allow maintenance during the 2nd week of the month (from the 8th day to the 14th day of the month), use the value 2.
Maintenance cannot be scheduled for the fifth week of months that contain more than 28 days.
Note that this parameter works in conjunction with the daysOfWeek and hoursOfDay parameters to allow you to specify specific days of
the week and hours that maintenance will be performed.
returned: on success
type: list
sample: []
days_of_week:
description:
- Days during the week when maintenance should be performed.
returned: on success
type: complex
contains:
name:
description:
- Name of the day of the week.
returned: on success
type: str
sample: MONDAY
hours_of_day:
description:
- "The window of hours during the day when maintenance should be performed. The window is a 4 hour slot. Valid values are
- 0 - represents time slot 0:00 - 3:59 UTC - 4 - represents time slot 4:00 - 7:59 UTC - 8 - represents time slot 8:00 - 11:59 UTC - 12
- represents time slot 12:00 - 15:59 UTC - 16 - represents time slot 16:00 - 19:59 UTC - 20 - represents time slot 20:00 - 23:59
UTC"
returned: on success
type: list
sample: []
lead_time_in_weeks:
description:
- Lead time window allows user to set a lead time to prepare for a down time. The lead time is in weeks and valid value is between 1 to
4.
returned: on success
type: int
sample: 56
last_maintenance_run_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the last maintenance run.
returned: on success
type: str
sample: "ocid1.lastmaintenancerun.oc1..xxxxxxEXAMPLExxxxxx"
next_maintenance_run_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the next maintenance run.
returned: on success
type: str
sample: "ocid1.nextmaintenancerun.oc1..xxxxxxEXAMPLExxxxxx"
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
scan_dns_name:
description:
- The FQDN of the DNS record for the SCAN IP addresses that are associated with the Autonomous Exadata Infrastructure.
returned: on success
type: str
sample: scan_dns_name_example
zone_id:
description:
- The OCID of the zone the Autonomous Exadata Infrastructure is associated with.
returned: on success
type: str
sample: "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"availability_domain": "Uocm:PHX-AD-1",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"nsg_ids": [],
"shape": "shape_example",
"hostname": "hostname_example",
"domain": "domain_example",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"license_model": "LICENSE_INCLUDED",
"time_created": "2013-10-20T19:20:30+01:00",
"maintenance_window": {
"preference": "NO_PREFERENCE",
"months": [{
"name": "JANUARY"
}],
"weeks_of_month": [],
"days_of_week": [{
"name": "MONDAY"
}],
"hours_of_day": [],
"lead_time_in_weeks": 56
},
"last_maintenance_run_id": "ocid1.lastmaintenancerun.oc1..xxxxxxEXAMPLExxxxxx",
"next_maintenance_run_id": "ocid1.nextmaintenancerun.oc1..xxxxxxEXAMPLExxxxxx",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"scan_dns_name": "scan_dns_name_example",
"zone_id": "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutonomousExadataInfrastructureFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"autonomous_exadata_infrastructure_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_autonomous_exadata_infrastructure,
autonomous_exadata_infrastructure_id=self.module.params.get(
"autonomous_exadata_infrastructure_id"
),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"lifecycle_state",
"availability_domain",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_autonomous_exadata_infrastructures,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
AutonomousExadataInfrastructureFactsHelperCustom = get_custom_class(
"AutonomousExadataInfrastructureFactsHelperCustom"
)
class ResourceFactsHelper(
AutonomousExadataInfrastructureFactsHelperCustom,
AutonomousExadataInfrastructureFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
autonomous_exadata_infrastructure_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
lifecycle_state=dict(
type="str",
choices=[
"PROVISIONING",
"AVAILABLE",
"UPDATING",
"TERMINATING",
"TERMINATED",
"FAILED",
"MAINTENANCE_IN_PROGRESS",
],
),
availability_domain=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="autonomous_exadata_infrastructure",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(autonomous_exadata_infrastructures=result)
if __name__ == "__main__":
main()
| 41.632883
| 160
| 0.600595
|
9831af9982bfdd3960142367ee3bd58baa5b1838
| 3,698
|
py
|
Python
|
app/services/pull_request_service.py
|
paullegranddc/gello
|
9f8ce0e67e5da66cb8d26d01edc9a132bdfe8738
|
[
"Apache-2.0"
] | null | null | null |
app/services/pull_request_service.py
|
paullegranddc/gello
|
9f8ce0e67e5da66cb8d26d01edc9a132bdfe8738
|
[
"Apache-2.0"
] | 1
|
2021-02-24T01:38:37.000Z
|
2021-02-24T01:38:37.000Z
|
app/services/pull_request_service.py
|
isabella232/gello
|
3c7c6c72187095e3675b3ca15dcab8a2adaf44c0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""pull_request_service.py
Service-helpers for creating and mutating pull_request data.
"""
from . import CRUDService
from .. import db
from ..models import PullRequest
class PullRequestService(CRUDService):
"""CRUD persistent storage service.
A class with the single responsibility of creating/mutating PullRequest
data.
"""
def create(self, name, url, github_pull_request_id, repo_id,
trello_board_id=None, trello_card_id=None, trello_card_url=None,
trello_list_id=None, jira_issue_key=None, jira_project_key=None,
jira_parent_issue_key=None):
"""Creates and persists a new pull_request record to the database.
Args:
name (str): The name of the GitHub pull request.
url (str): The GitHub url of the pull request.
github_pull request_id (int): The id of the GitHub pull request.
repo_id (int): The id of the GitHub repo corresponding to the
pull request.
trello_board_id (str): The id of the board the card corresponding
to the pull request was created on.
trello_card_id (str): The id of the card created corresponding to
the isuse.
trello_card_url (str): The url for the created card corresponding
to the pull request.
trello_list_id (str): The id for the list the card corresponding
to the issue was created on.
jira_issue_key (str): The key of the created jira issue
corresponding to the pull request
jira_project_key (str): The key of the project the jira issue
corresponding to the pull request was created under
jira_parent_issue_key (str): The key of the issue the
sub-issue corresponding to the pull request was created under
(if a sub-issue was indeed created)
Returns:
None
"""
pull_request = PullRequest(
name=name,
url=url,
github_pull_request_id=github_pull_request_id,
repo_id=repo_id,
trello_board_id=trello_board_id,
trello_card_id=trello_card_id,
trello_card_url=trello_card_url,
trello_list_id=trello_list_id,
jira_issue_key=jira_issue_key,
jira_project_key=jira_project_key,
jira_parent_issue_key=jira_parent_issue_key
)
db.session.add(pull_request)
# Persists the pull_request
db.session.commit()
def update(self, github_pull_request_id, name):
"""Updates a persisted pull_request.
Args:
github_pull_request_id (int): The id of the GitHub pull request.
name (str): The updated name of the GitHub pull request.
Returns:
None
"""
for pr in PullRequest.query.filter_by(
github_pull_request_id=github_pull_request_id
):
pr.name = name
db.session.commit()
def delete(self, github_pull_request_id):
"""Deletes an old, persisted pull_request.
Args:
github_pull_request_id (int): The id of the GitHub pull request.
Returns:
None
"""
PullRequest.query.filter_by(
github_pull_request_id=github_pull_request_id).delete()
db.session.commit()
| 33.618182
| 79
| 0.631422
|
92264f5d58f278f6a156231ecfb8df8b6ae3b856
| 837
|
py
|
Python
|
pictures/migrations/0001_initial.py
|
amiinegal/Gallery-app
|
3619f1717e8f813e151f7a178f0789f06bd52c02
|
[
"Unlicense",
"MIT"
] | null | null | null |
pictures/migrations/0001_initial.py
|
amiinegal/Gallery-app
|
3619f1717e8f813e151f7a178f0789f06bd52c02
|
[
"Unlicense",
"MIT"
] | null | null | null |
pictures/migrations/0001_initial.py
|
amiinegal/Gallery-app
|
3619f1717e8f813e151f7a178f0789f06bd52c02
|
[
"Unlicense",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-12 03:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location_name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
| 27
| 114
| 0.569892
|
a06fe27bbe92847c3f7d1e3c65717b6234100e66
| 89,073
|
py
|
Python
|
ac_dc/stopwords.py
|
tttorrent/data_tooling
|
6540bcb3cd2dbaa3b20c8a1bd84e4cbde7ebe50b
|
[
"Apache-2.0"
] | null | null | null |
ac_dc/stopwords.py
|
tttorrent/data_tooling
|
6540bcb3cd2dbaa3b20c8a1bd84e4cbde7ebe50b
|
[
"Apache-2.0"
] | null | null | null |
ac_dc/stopwords.py
|
tttorrent/data_tooling
|
6540bcb3cd2dbaa3b20c8a1bd84e4cbde7ebe50b
|
[
"Apache-2.0"
] | null | null | null |
# From https://github.com/6/stopwords-json
# From https://github.com/stopwords-iso/stopwords-iso for Urdu and Vietnamese
stopwords = {
"af": [
"'n",
"aan",
"af",
"al",
"as",
"baie",
"by",
"daar",
"dag",
"dat",
"die",
"dit",
"een",
"ek",
"en",
"gaan",
"gesê",
"haar",
"het",
"hom",
"hulle",
"hy",
"in",
"is",
"jou",
"jy",
"kan",
"kom",
"ma",
"maar",
"met",
"my",
"na",
"nie",
"om",
"ons",
"op",
"saam",
"sal",
"se",
"sien",
"so",
"sy",
"te",
"toe",
"uit",
"van",
"vir",
"was",
"wat",
"ʼn",
],
"ar": [
"،",
"أ",
"ا",
"اثر",
"اجل",
"احد",
"اخرى",
"اذا",
"اربعة",
"اطار",
"اعادة",
"اعلنت",
"اف",
"اكثر",
"اكد",
"الا",
"الاخيرة",
"الان",
"الاول",
"الاولى",
"التى",
"التي",
"الثاني",
"الثانية",
"الذاتي",
"الذى",
"الذي",
"الذين",
"السابق",
"الف",
"الماضي",
"المقبل",
"الوقت",
"الى",
"اليوم",
"اما",
"امام",
"امس",
"ان",
"انه",
"انها",
"او",
"اول",
"اي",
"ايار",
"ايام",
"ايضا",
"ب",
"باسم",
"بان",
"برس",
"بسبب",
"بشكل",
"بعد",
"بعض",
"بن",
"به",
"بها",
"بين",
"تم",
"ثلاثة",
"ثم",
"جميع",
"حاليا",
"حتى",
"حوالى",
"حول",
"حيث",
"حين",
"خلال",
"دون",
"ذلك",
"زيارة",
"سنة",
"سنوات",
"شخصا",
"صباح",
"صفر",
"ضد",
"ضمن",
"عام",
"عاما",
"عدة",
"عدد",
"عدم",
"عشر",
"عشرة",
"على",
"عليه",
"عليها",
"عن",
"عند",
"عندما",
"غدا",
"غير",
"ـ",
"ف",
"فان",
"فى",
"في",
"فيه",
"فيها",
"قال",
"قبل",
"قد",
"قوة",
"كان",
"كانت",
"كل",
"كلم",
"كما",
"لا",
"لدى",
"لقاء",
"لكن",
"للامم",
"لم",
"لن",
"له",
"لها",
"لوكالة",
"ما",
"مايو",
"مساء",
"مع",
"مقابل",
"مليار",
"مليون",
"من",
"منذ",
"منها",
"نحو",
"نفسه",
"نهاية",
"هذا",
"هذه",
"هناك",
"هو",
"هي",
"و",
"و6",
"واحد",
"واضاف",
"واضافت",
"واكد",
"وان",
"واوضح",
"وفي",
"وقال",
"وقالت",
"وقد",
"وقف",
"وكان",
"وكانت",
"ولا",
"ولم",
"ومن",
"وهو",
"وهي",
"يكون",
"يمكن",
"يوم",
],
"bn": [
"অনেক",
"অন্য",
"অবশ্য",
"আগে",
"আছে",
"আজ",
"আবার",
"আমরা",
"আমাদের",
"আর",
"ই",
"উত্তর",
"উপর",
"উপরে",
"এ",
"এই",
"এক্",
"এখন",
"এত",
"এব",
"এমন",
"এমনি",
"এর",
"এস",
"এসে",
"ও",
"ওই",
"কমনে",
"করা",
"করে",
"কাছে",
"কাজ",
"কাজে",
"কারণ",
"কি",
"কিছু",
"কে",
"কেউ",
"কেখা",
"কেন",
"কোটি",
"কোনো",
"কয়েক",
"খুব",
"গিয়ে",
"গেল",
"চার",
"চালু",
"চেষ্টা",
"ছিল",
"জানা",
"জ্নজন",
"টি",
"তখন",
"তবে",
"তা",
"তাই",
"তো",
"থাকা",
"থেকে",
"দিন",
"দু",
"দুই",
"দেওয়া",
"ধামার",
"নতুন",
"না",
"নাগাদ",
"নিয়ে",
"নেওয়া",
"নয়",
"পর",
"পরে",
"পাচ",
"পি",
"পেয়্র্",
"প্রতি",
"প্রথম",
"প্রযন্ত",
"প্রাথমিক",
"প্রায়",
"বক্তব্য",
"বন",
"বলা",
"বলে",
"বলেন",
"বহু",
"বা",
"বি",
"বিভিন্ন",
"বেশ",
"বেশি",
"মতো",
"মধ্যে",
"মনে",
"যখন",
"যদি",
"যা",
"যাওয়া",
"যে",
"র",
"রকম",
"লক্ষ",
"শুধু",
"শুরু",
"সঙ্গে",
"সব",
"সহ",
"সাধারণ",
"সামনে",
"সি",
"সে",
"সেই",
"হতে",
"হাজার",
"হয়",
],
"ca": [
"a",
"abans",
"ací",
"ah",
"així",
"això",
"al",
"aleshores",
"algun",
"alguna",
"algunes",
"alguns",
"alhora",
"allà",
"allí",
"allò",
"als",
"altra",
"altre",
"altres",
"amb",
"ambdues",
"ambdós",
"apa",
"aquell",
"aquella",
"aquelles",
"aquells",
"aquest",
"aquesta",
"aquestes",
"aquests",
"aquí",
"baix",
"cada",
"cadascuna",
"cadascunes",
"cadascuns",
"cadascú",
"com",
"contra",
"d'un",
"d'una",
"d'unes",
"d'uns",
"dalt",
"de",
"del",
"dels",
"des",
"després",
"dins",
"dintre",
"donat",
"doncs",
"durant",
"e",
"eh",
"el",
"els",
"em",
"en",
"encara",
"ens",
"entre",
"eren",
"es",
"esta",
"estaven",
"esteu",
"està",
"estàvem",
"estàveu",
"et",
"etc",
"ets",
"fins",
"fora",
"gairebé",
"ha",
"han",
"has",
"havia",
"he",
"hem",
"heu",
"hi",
"ho",
"i",
"igual",
"iguals",
"ja",
"l'hi",
"la",
"les",
"li",
"li'n",
"llavors",
"m'he",
"ma",
"mal",
"malgrat",
"mateix",
"mateixa",
"mateixes",
"mateixos",
"me",
"mentre",
"meu",
"meus",
"meva",
"meves",
"molt",
"molta",
"moltes",
"molts",
"mon",
"mons",
"més",
"n'he",
"n'hi",
"ne",
"ni",
"no",
"nogensmenys",
"només",
"nosaltres",
"nostra",
"nostre",
"nostres",
"o",
"oh",
"oi",
"on",
"pas",
"pel",
"pels",
"per",
"perquè",
"però",
"poc",
"poca",
"pocs",
"poques",
"potser",
"propi",
"qual",
"quals",
"quan",
"quant",
"que",
"quelcom",
"qui",
"quin",
"quina",
"quines",
"quins",
"què",
"s'ha",
"s'han",
"sa",
"semblant",
"semblants",
"ses",
"seu",
"seus",
"seva",
"seves",
"si",
"sobre",
"sobretot",
"solament",
"sols",
"son",
"sons",
"sota",
"sou",
"sóc",
"són",
"t'ha",
"t'han",
"t'he",
"ta",
"tal",
"també",
"tampoc",
"tan",
"tant",
"tanta",
"tantes",
"teu",
"teus",
"teva",
"teves",
"ton",
"tons",
"tot",
"tota",
"totes",
"tots",
"un",
"una",
"unes",
"uns",
"us",
"va",
"vaig",
"vam",
"van",
"vas",
"veu",
"vosaltres",
"vostra",
"vostre",
"vostres",
"érem",
"éreu",
"és",
],
"en": [
"a",
"a's",
"able",
"about",
"above",
"according",
"accordingly",
"across",
"actually",
"after",
"afterwards",
"again",
"against",
"ain't",
"all",
"allow",
"allows",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"am",
"among",
"amongst",
"an",
"and",
"another",
"any",
"anybody",
"anyhow",
"anyone",
"anything",
"anyway",
"anyways",
"anywhere",
"apart",
"appear",
"appreciate",
"appropriate",
"are",
"aren't",
"around",
"as",
"aside",
"ask",
"asking",
"associated",
"at",
"available",
"away",
"awfully",
"b",
"be",
"became",
"because",
"become",
"becomes",
"becoming",
"been",
"before",
"beforehand",
"behind",
"being",
"believe",
"below",
"beside",
"besides",
"best",
"better",
"between",
"beyond",
"both",
"brief",
"but",
"by",
"c",
"c'mon",
"c's",
"came",
"can",
"can't",
"cannot",
"cant",
"cause",
"causes",
"certain",
"certainly",
"changes",
"clearly",
"co",
"com",
"come",
"comes",
"concerning",
"consequently",
"consider",
"considering",
"contain",
"containing",
"contains",
"corresponding",
"could",
"couldn't",
"course",
"currently",
"d",
"definitely",
"described",
"despite",
"did",
"didn't",
"different",
"do",
"does",
"doesn't",
"doing",
"don't",
"done",
"down",
"downwards",
"during",
"e",
"each",
"edu",
"eg",
"eight",
"either",
"else",
"elsewhere",
"enough",
"entirely",
"especially",
"et",
"etc",
"even",
"ever",
"every",
"everybody",
"everyone",
"everything",
"everywhere",
"ex",
"exactly",
"example",
"except",
"f",
"far",
"few",
"fifth",
"first",
"five",
"followed",
"following",
"follows",
"for",
"former",
"formerly",
"forth",
"four",
"from",
"further",
"furthermore",
"g",
"get",
"gets",
"getting",
"given",
"gives",
"go",
"goes",
"going",
"gone",
"got",
"gotten",
"greetings",
"h",
"had",
"hadn't",
"happens",
"hardly",
"has",
"hasn't",
"have",
"haven't",
"having",
"he",
"he's",
"hello",
"help",
"hence",
"her",
"here",
"here's",
"hereafter",
"hereby",
"herein",
"hereupon",
"hers",
"herself",
"hi",
"him",
"himself",
"his",
"hither",
"hopefully",
"how",
"howbeit",
"however",
"i",
"i'd",
"i'll",
"i'm",
"i've",
"ie",
"if",
"ignored",
"immediate",
"in",
"inasmuch",
"inc",
"indeed",
"indicate",
"indicated",
"indicates",
"inner",
"insofar",
"instead",
"into",
"inward",
"is",
"isn't",
"it",
"it'd",
"it'll",
"it's",
"its",
"itself",
"j",
"just",
"k",
"keep",
"keeps",
"kept",
"know",
"known",
"knows",
"l",
"last",
"lately",
"later",
"latter",
"latterly",
"least",
"less",
"lest",
"let",
"let's",
"like",
"liked",
"likely",
"little",
"look",
"looking",
"looks",
"ltd",
"m",
"mainly",
"many",
"may",
"maybe",
"me",
"mean",
"meanwhile",
"merely",
"might",
"more",
"moreover",
"most",
"mostly",
"much",
"must",
"my",
"myself",
"n",
"name",
"namely",
"nd",
"near",
"nearly",
"necessary",
"need",
"needs",
"neither",
"never",
"nevertheless",
"new",
"next",
"nine",
"no",
"nobody",
"non",
"none",
"noone",
"nor",
"normally",
"not",
"nothing",
"novel",
"now",
"nowhere",
"o",
"obviously",
"of",
"off",
"often",
"oh",
"ok",
"okay",
"old",
"on",
"once",
"one",
"ones",
"only",
"onto",
"or",
"other",
"others",
"otherwise",
"ought",
"our",
"ours",
"ourselves",
"out",
"outside",
"over",
"overall",
"own",
"p",
"particular",
"particularly",
"per",
"perhaps",
"placed",
"please",
"plus",
"possible",
"presumably",
"probably",
"provides",
"q",
"que",
"quite",
"qv",
"r",
"rather",
"rd",
"re",
"really",
"reasonably",
"regarding",
"regardless",
"regards",
"relatively",
"respectively",
"right",
"s",
"said",
"same",
"saw",
"say",
"saying",
"says",
"second",
"secondly",
"see",
"seeing",
"seem",
"seemed",
"seeming",
"seems",
"seen",
"self",
"selves",
"sensible",
"sent",
"serious",
"seriously",
"seven",
"several",
"shall",
"she",
"should",
"shouldn't",
"since",
"six",
"so",
"some",
"somebody",
"somehow",
"someone",
"something",
"sometime",
"sometimes",
"somewhat",
"somewhere",
"soon",
"sorry",
"specified",
"specify",
"specifying",
"still",
"sub",
"such",
"sup",
"sure",
"t",
"t's",
"take",
"taken",
"tell",
"tends",
"th",
"than",
"thank",
"thanks",
"thanx",
"that",
"that's",
"thats",
"the",
"their",
"theirs",
"them",
"themselves",
"then",
"thence",
"there",
"there's",
"thereafter",
"thereby",
"therefore",
"therein",
"theres",
"thereupon",
"these",
"they",
"they'd",
"they'll",
"they're",
"they've",
"think",
"third",
"this",
"thorough",
"thoroughly",
"those",
"though",
"three",
"through",
"throughout",
"thru",
"thus",
"to",
"together",
"too",
"took",
"toward",
"towards",
"tried",
"tries",
"truly",
"try",
"trying",
"twice",
"two",
"u",
"un",
"under",
"unfortunately",
"unless",
"unlikely",
"until",
"unto",
"up",
"upon",
"us",
"use",
"used",
"useful",
"uses",
"using",
"usually",
"uucp",
"v",
"value",
"various",
"very",
"via",
"viz",
"vs",
"w",
"want",
"wants",
"was",
"wasn't",
"way",
"we",
"we'd",
"we'll",
"we're",
"we've",
"welcome",
"well",
"went",
"were",
"weren't",
"what",
"what's",
"whatever",
"when",
"whence",
"whenever",
"where",
"where's",
"whereafter",
"whereas",
"whereby",
"wherein",
"whereupon",
"wherever",
"whether",
"which",
"while",
"whither",
"who",
"who's",
"whoever",
"whole",
"whom",
"whose",
"why",
"will",
"willing",
"wish",
"with",
"within",
"without",
"won't",
"wonder",
"would",
"wouldn't",
"x",
"y",
"yes",
"yet",
"you",
"you'd",
"you'll",
"you're",
"you've",
"your",
"yours",
"yourself",
"yourselves",
"z",
"zero",
],
"es": [
"a",
"actualmente",
"acuerdo",
"adelante",
"ademas",
"además",
"adrede",
"afirmó",
"agregó",
"ahi",
"ahora",
"ahí",
"al",
"algo",
"alguna",
"algunas",
"alguno",
"algunos",
"algún",
"alli",
"allí",
"alrededor",
"ambos",
"ampleamos",
"antano",
"antaño",
"ante",
"anterior",
"antes",
"apenas",
"aproximadamente",
"aquel",
"aquella",
"aquellas",
"aquello",
"aquellos",
"aqui",
"aquél",
"aquélla",
"aquéllas",
"aquéllos",
"aquí",
"arriba",
"arribaabajo",
"aseguró",
"asi",
"así",
"atras",
"aun",
"aunque",
"ayer",
"añadió",
"aún",
"b",
"bajo",
"bastante",
"bien",
"breve",
"buen",
"buena",
"buenas",
"bueno",
"buenos",
"c",
"cada",
"casi",
"cerca",
"cierta",
"ciertas",
"cierto",
"ciertos",
"cinco",
"claro",
"comentó",
"como",
"con",
"conmigo",
"conocer",
"conseguimos",
"conseguir",
"considera",
"consideró",
"consigo",
"consigue",
"consiguen",
"consigues",
"contigo",
"contra",
"cosas",
"creo",
"cual",
"cuales",
"cualquier",
"cuando",
"cuanta",
"cuantas",
"cuanto",
"cuantos",
"cuatro",
"cuenta",
"cuál",
"cuáles",
"cuándo",
"cuánta",
"cuántas",
"cuánto",
"cuántos",
"cómo",
"d",
"da",
"dado",
"dan",
"dar",
"de",
"debajo",
"debe",
"deben",
"debido",
"decir",
"dejó",
"del",
"delante",
"demasiado",
"demás",
"dentro",
"deprisa",
"desde",
"despacio",
"despues",
"después",
"detras",
"detrás",
"dia",
"dias",
"dice",
"dicen",
"dicho",
"dieron",
"diferente",
"diferentes",
"dijeron",
"dijo",
"dio",
"donde",
"dos",
"durante",
"día",
"días",
"dónde",
"e",
"ejemplo",
"el",
"ella",
"ellas",
"ello",
"ellos",
"embargo",
"empleais",
"emplean",
"emplear",
"empleas",
"empleo",
"en",
"encima",
"encuentra",
"enfrente",
"enseguida",
"entonces",
"entre",
"era",
"eramos",
"eran",
"eras",
"eres",
"es",
"esa",
"esas",
"ese",
"eso",
"esos",
"esta",
"estaba",
"estaban",
"estado",
"estados",
"estais",
"estamos",
"estan",
"estar",
"estará",
"estas",
"este",
"esto",
"estos",
"estoy",
"estuvo",
"está",
"están",
"ex",
"excepto",
"existe",
"existen",
"explicó",
"expresó",
"f",
"fin",
"final",
"fue",
"fuera",
"fueron",
"fui",
"fuimos",
"g",
"general",
"gran",
"grandes",
"gueno",
"h",
"ha",
"haber",
"habia",
"habla",
"hablan",
"habrá",
"había",
"habían",
"hace",
"haceis",
"hacemos",
"hacen",
"hacer",
"hacerlo",
"haces",
"hacia",
"haciendo",
"hago",
"han",
"hasta",
"hay",
"haya",
"he",
"hecho",
"hemos",
"hicieron",
"hizo",
"horas",
"hoy",
"hubo",
"i",
"igual",
"incluso",
"indicó",
"informo",
"informó",
"intenta",
"intentais",
"intentamos",
"intentan",
"intentar",
"intentas",
"intento",
"ir",
"j",
"junto",
"k",
"l",
"la",
"lado",
"largo",
"las",
"le",
"lejos",
"les",
"llegó",
"lleva",
"llevar",
"lo",
"los",
"luego",
"lugar",
"m",
"mal",
"manera",
"manifestó",
"mas",
"mayor",
"me",
"mediante",
"medio",
"mejor",
"mencionó",
"menos",
"menudo",
"mi",
"mia",
"mias",
"mientras",
"mio",
"mios",
"mis",
"misma",
"mismas",
"mismo",
"mismos",
"modo",
"momento",
"mucha",
"muchas",
"mucho",
"muchos",
"muy",
"más",
"mí",
"mía",
"mías",
"mío",
"míos",
"n",
"nada",
"nadie",
"ni",
"ninguna",
"ningunas",
"ninguno",
"ningunos",
"ningún",
"no",
"nos",
"nosotras",
"nosotros",
"nuestra",
"nuestras",
"nuestro",
"nuestros",
"nueva",
"nuevas",
"nuevo",
"nuevos",
"nunca",
"o",
"ocho",
"os",
"otra",
"otras",
"otro",
"otros",
"p",
"pais",
"para",
"parece",
"parte",
"partir",
"pasada",
"pasado",
"paìs",
"peor",
"pero",
"pesar",
"poca",
"pocas",
"poco",
"pocos",
"podeis",
"podemos",
"poder",
"podria",
"podriais",
"podriamos",
"podrian",
"podrias",
"podrá",
"podrán",
"podría",
"podrían",
"poner",
"por",
"porque",
"posible",
"primer",
"primera",
"primero",
"primeros",
"principalmente",
"pronto",
"propia",
"propias",
"propio",
"propios",
"proximo",
"próximo",
"próximos",
"pudo",
"pueda",
"puede",
"pueden",
"puedo",
"pues",
"q",
"qeu",
"que",
"quedó",
"queremos",
"quien",
"quienes",
"quiere",
"quiza",
"quizas",
"quizá",
"quizás",
"quién",
"quiénes",
"qué",
"r",
"raras",
"realizado",
"realizar",
"realizó",
"repente",
"respecto",
"s",
"sabe",
"sabeis",
"sabemos",
"saben",
"saber",
"sabes",
"salvo",
"se",
"sea",
"sean",
"segun",
"segunda",
"segundo",
"según",
"seis",
"ser",
"sera",
"será",
"serán",
"sería",
"señaló",
"si",
"sido",
"siempre",
"siendo",
"siete",
"sigue",
"siguiente",
"sin",
"sino",
"sobre",
"sois",
"sola",
"solamente",
"solas",
"solo",
"solos",
"somos",
"son",
"soy",
"soyos",
"su",
"supuesto",
"sus",
"suya",
"suyas",
"suyo",
"sé",
"sí",
"sólo",
"t",
"tal",
"tambien",
"también",
"tampoco",
"tan",
"tanto",
"tarde",
"te",
"temprano",
"tendrá",
"tendrán",
"teneis",
"tenemos",
"tener",
"tenga",
"tengo",
"tenido",
"tenía",
"tercera",
"ti",
"tiempo",
"tiene",
"tienen",
"toda",
"todas",
"todavia",
"todavía",
"todo",
"todos",
"total",
"trabaja",
"trabajais",
"trabajamos",
"trabajan",
"trabajar",
"trabajas",
"trabajo",
"tras",
"trata",
"través",
"tres",
"tu",
"tus",
"tuvo",
"tuya",
"tuyas",
"tuyo",
"tuyos",
"tú",
"u",
"ultimo",
"un",
"una",
"unas",
"uno",
"unos",
"usa",
"usais",
"usamos",
"usan",
"usar",
"usas",
"uso",
"usted",
"ustedes",
"v",
"va",
"vais",
"valor",
"vamos",
"van",
"varias",
"varios",
"vaya",
"veces",
"ver",
"verdad",
"verdadera",
"verdadero",
"vez",
"vosotras",
"vosotros",
"voy",
"vuestra",
"vuestras",
"vuestro",
"vuestros",
"w",
"x",
"y",
"ya",
"yo",
"z",
"él",
"ésa",
"ésas",
"ése",
"ésos",
"ésta",
"éstas",
"éste",
"éstos",
"última",
"últimas",
"último",
"últimos",
],
"eu": [
"al",
"anitz",
"arabera",
"asko",
"baina",
"bat",
"batean",
"batek",
"bati",
"batzuei",
"batzuek",
"batzuetan",
"batzuk",
"bera",
"beraiek",
"berau",
"berauek",
"bere",
"berori",
"beroriek",
"beste",
"bezala",
"da",
"dago",
"dira",
"ditu",
"du",
"dute",
"edo",
"egin",
"ere",
"eta",
"eurak",
"ez",
"gainera",
"gu",
"gutxi",
"guzti",
"haiei",
"haiek",
"haietan",
"hainbeste",
"hala",
"han",
"handik",
"hango",
"hara",
"hari",
"hark",
"hartan",
"hau",
"hauei",
"hauek",
"hauetan",
"hemen",
"hemendik",
"hemengo",
"hi",
"hona",
"honek",
"honela",
"honetan",
"honi",
"hor",
"hori",
"horiei",
"horiek",
"horietan",
"horko",
"horra",
"horrek",
"horrela",
"horretan",
"horri",
"hortik",
"hura",
"izan",
"ni",
"noiz",
"nola",
"non",
"nondik",
"nongo",
"nor",
"nora",
"ze",
"zein",
"zen",
"zenbait",
"zenbat",
"zer",
"zergatik",
"ziren",
"zituen",
"zu",
"zuek",
"zuen",
"zuten",
],
"fr": [
"a",
"abord",
"absolument",
"afin",
"ah",
"ai",
"aie",
"ailleurs",
"ainsi",
"ait",
"allaient",
"allo",
"allons",
"allô",
"alors",
"anterieur",
"anterieure",
"anterieures",
"apres",
"après",
"as",
"assez",
"attendu",
"au",
"aucun",
"aucune",
"aujourd",
"aujourd'hui",
"aupres",
"auquel",
"aura",
"auraient",
"aurait",
"auront",
"aussi",
"autre",
"autrefois",
"autrement",
"autres",
"autrui",
"aux",
"auxquelles",
"auxquels",
"avaient",
"avais",
"avait",
"avant",
"avec",
"avoir",
"avons",
"ayant",
"b",
"bah",
"bas",
"basee",
"bat",
"beau",
"beaucoup",
"bien",
"bigre",
"boum",
"bravo",
"brrr",
"c",
"car",
"ce",
"ceci",
"cela",
"celle",
"celle-ci",
"celle-là",
"celles",
"celles-ci",
"celles-là",
"celui",
"celui-ci",
"celui-là",
"cent",
"cependant",
"certain",
"certaine",
"certaines",
"certains",
"certes",
"ces",
"cet",
"cette",
"ceux",
"ceux-ci",
"ceux-là",
"chacun",
"chacune",
"chaque",
"cher",
"chers",
"chez",
"chiche",
"chut",
"chère",
"chères",
"ci",
"cinq",
"cinquantaine",
"cinquante",
"cinquantième",
"cinquième",
"clac",
"clic",
"combien",
"comme",
"comment",
"comparable",
"comparables",
"compris",
"concernant",
"contre",
"couic",
"crac",
"d",
"da",
"dans",
"de",
"debout",
"dedans",
"dehors",
"deja",
"delà",
"depuis",
"dernier",
"derniere",
"derriere",
"derrière",
"des",
"desormais",
"desquelles",
"desquels",
"dessous",
"dessus",
"deux",
"deuxième",
"deuxièmement",
"devant",
"devers",
"devra",
"different",
"differentes",
"differents",
"différent",
"différente",
"différentes",
"différents",
"dire",
"directe",
"directement",
"dit",
"dite",
"dits",
"divers",
"diverse",
"diverses",
"dix",
"dix-huit",
"dix-neuf",
"dix-sept",
"dixième",
"doit",
"doivent",
"donc",
"dont",
"douze",
"douzième",
"dring",
"du",
"duquel",
"durant",
"dès",
"désormais",
"e",
"effet",
"egale",
"egalement",
"egales",
"eh",
"elle",
"elle-même",
"elles",
"elles-mêmes",
"en",
"encore",
"enfin",
"entre",
"envers",
"environ",
"es",
"est",
"et",
"etant",
"etc",
"etre",
"eu",
"euh",
"eux",
"eux-mêmes",
"exactement",
"excepté",
"extenso",
"exterieur",
"f",
"fais",
"faisaient",
"faisant",
"fait",
"façon",
"feront",
"fi",
"flac",
"floc",
"font",
"g",
"gens",
"h",
"ha",
"hein",
"hem",
"hep",
"hi",
"ho",
"holà",
"hop",
"hormis",
"hors",
"hou",
"houp",
"hue",
"hui",
"huit",
"huitième",
"hum",
"hurrah",
"hé",
"hélas",
"i",
"il",
"ils",
"importe",
"j",
"je",
"jusqu",
"jusque",
"juste",
"k",
"l",
"la",
"laisser",
"laquelle",
"las",
"le",
"lequel",
"les",
"lesquelles",
"lesquels",
"leur",
"leurs",
"longtemps",
"lors",
"lorsque",
"lui",
"lui-meme",
"lui-même",
"là",
"lès",
"m",
"ma",
"maint",
"maintenant",
"mais",
"malgre",
"malgré",
"maximale",
"me",
"meme",
"memes",
"merci",
"mes",
"mien",
"mienne",
"miennes",
"miens",
"mille",
"mince",
"minimale",
"moi",
"moi-meme",
"moi-même",
"moindres",
"moins",
"mon",
"moyennant",
"multiple",
"multiples",
"même",
"mêmes",
"n",
"na",
"naturel",
"naturelle",
"naturelles",
"ne",
"neanmoins",
"necessaire",
"necessairement",
"neuf",
"neuvième",
"ni",
"nombreuses",
"nombreux",
"non",
"nos",
"notamment",
"notre",
"nous",
"nous-mêmes",
"nouveau",
"nul",
"néanmoins",
"nôtre",
"nôtres",
"o",
"oh",
"ohé",
"ollé",
"olé",
"on",
"ont",
"onze",
"onzième",
"ore",
"ou",
"ouf",
"ouias",
"oust",
"ouste",
"outre",
"ouvert",
"ouverte",
"ouverts",
"o|",
"où",
"p",
"paf",
"pan",
"par",
"parce",
"parfois",
"parle",
"parlent",
"parler",
"parmi",
"parseme",
"partant",
"particulier",
"particulière",
"particulièrement",
"pas",
"passé",
"pendant",
"pense",
"permet",
"personne",
"peu",
"peut",
"peuvent",
"peux",
"pff",
"pfft",
"pfut",
"pif",
"pire",
"plein",
"plouf",
"plus",
"plusieurs",
"plutôt",
"possessif",
"possessifs",
"possible",
"possibles",
"pouah",
"pour",
"pourquoi",
"pourrais",
"pourrait",
"pouvait",
"prealable",
"precisement",
"premier",
"première",
"premièrement",
"pres",
"probable",
"probante",
"procedant",
"proche",
"près",
"psitt",
"pu",
"puis",
"puisque",
"pur",
"pure",
"q",
"qu",
"quand",
"quant",
"quant-à-soi",
"quanta",
"quarante",
"quatorze",
"quatre",
"quatre-vingt",
"quatrième",
"quatrièmement",
"que",
"quel",
"quelconque",
"quelle",
"quelles",
"quelqu'un",
"quelque",
"quelques",
"quels",
"qui",
"quiconque",
"quinze",
"quoi",
"quoique",
"r",
"rare",
"rarement",
"rares",
"relative",
"relativement",
"remarquable",
"rend",
"rendre",
"restant",
"reste",
"restent",
"restrictif",
"retour",
"revoici",
"revoilà",
"rien",
"s",
"sa",
"sacrebleu",
"sait",
"sans",
"sapristi",
"sauf",
"se",
"sein",
"seize",
"selon",
"semblable",
"semblaient",
"semble",
"semblent",
"sent",
"sept",
"septième",
"sera",
"seraient",
"serait",
"seront",
"ses",
"seul",
"seule",
"seulement",
"si",
"sien",
"sienne",
"siennes",
"siens",
"sinon",
"six",
"sixième",
"soi",
"soi-même",
"soit",
"soixante",
"son",
"sont",
"sous",
"souvent",
"specifique",
"specifiques",
"speculatif",
"stop",
"strictement",
"subtiles",
"suffisant",
"suffisante",
"suffit",
"suis",
"suit",
"suivant",
"suivante",
"suivantes",
"suivants",
"suivre",
"superpose",
"sur",
"surtout",
"t",
"ta",
"tac",
"tant",
"tardive",
"te",
"tel",
"telle",
"tellement",
"telles",
"tels",
"tenant",
"tend",
"tenir",
"tente",
"tes",
"tic",
"tien",
"tienne",
"tiennes",
"tiens",
"toc",
"toi",
"toi-même",
"ton",
"touchant",
"toujours",
"tous",
"tout",
"toute",
"toutefois",
"toutes",
"treize",
"trente",
"tres",
"trois",
"troisième",
"troisièmement",
"trop",
"très",
"tsoin",
"tsouin",
"tu",
"té",
"u",
"un",
"une",
"unes",
"uniformement",
"unique",
"uniques",
"uns",
"v",
"va",
"vais",
"vas",
"vers",
"via",
"vif",
"vifs",
"vingt",
"vivat",
"vive",
"vives",
"vlan",
"voici",
"voilà",
"vont",
"vos",
"votre",
"vous",
"vous-mêmes",
"vu",
"vé",
"vôtre",
"vôtres",
"w",
"x",
"y",
"z",
"zut",
"à",
"â",
"ça",
"ès",
"étaient",
"étais",
"était",
"étant",
"été",
"être",
"ô",
],
"hi": [
"अंदर",
"अत",
"अदि",
"अप",
"अपना",
"अपनि",
"अपनी",
"अपने",
"अभि",
"अभी",
"आदि",
"आप",
"इंहिं",
"इंहें",
"इंहों",
"इतयादि",
"इत्यादि",
"इन",
"इनका",
"इन्हीं",
"इन्हें",
"इन्हों",
"इस",
"इसका",
"इसकि",
"इसकी",
"इसके",
"इसमें",
"इसि",
"इसी",
"इसे",
"उंहिं",
"उंहें",
"उंहों",
"उन",
"उनका",
"उनकि",
"उनकी",
"उनके",
"उनको",
"उन्हीं",
"उन्हें",
"उन्हों",
"उस",
"उसके",
"उसि",
"उसी",
"उसे",
"एक",
"एवं",
"एस",
"एसे",
"ऐसे",
"ओर",
"और",
"कइ",
"कई",
"कर",
"करता",
"करते",
"करना",
"करने",
"करें",
"कहते",
"कहा",
"का",
"काफि",
"काफ़ी",
"कि",
"किंहें",
"किंहों",
"कितना",
"किन्हें",
"किन्हों",
"किया",
"किर",
"किस",
"किसि",
"किसी",
"किसे",
"की",
"कुछ",
"कुल",
"के",
"को",
"कोइ",
"कोई",
"कोन",
"कोनसा",
"कौन",
"कौनसा",
"गया",
"घर",
"जब",
"जहाँ",
"जहां",
"जा",
"जिंहें",
"जिंहों",
"जितना",
"जिधर",
"जिन",
"जिन्हें",
"जिन्हों",
"जिस",
"जिसे",
"जीधर",
"जेसा",
"जेसे",
"जैसा",
"जैसे",
"जो",
"तक",
"तब",
"तरह",
"तिंहें",
"तिंहों",
"तिन",
"तिन्हें",
"तिन्हों",
"तिस",
"तिसे",
"तो",
"था",
"थि",
"थी",
"थे",
"दबारा",
"दवारा",
"दिया",
"दुसरा",
"दुसरे",
"दूसरे",
"दो",
"द्वारा",
"न",
"नहिं",
"नहीं",
"ना",
"निचे",
"निहायत",
"नीचे",
"ने",
"पर",
"पहले",
"पुरा",
"पूरा",
"पे",
"फिर",
"बनि",
"बनी",
"बहि",
"बही",
"बहुत",
"बाद",
"बाला",
"बिलकुल",
"भि",
"भितर",
"भी",
"भीतर",
"मगर",
"मानो",
"मे",
"में",
"यदि",
"यह",
"यहाँ",
"यहां",
"यहि",
"यही",
"या",
"यिह",
"ये",
"रखें",
"रवासा",
"रहा",
"रहे",
"ऱ्वासा",
"लिए",
"लिये",
"लेकिन",
"व",
"वगेरह",
"वरग",
"वर्ग",
"वह",
"वहाँ",
"वहां",
"वहिं",
"वहीं",
"वाले",
"वुह",
"वे",
"वग़ैरह",
"संग",
"सकता",
"सकते",
"सबसे",
"सभि",
"सभी",
"साथ",
"साबुत",
"साभ",
"सारा",
"से",
"सो",
"हि",
"ही",
"हुअ",
"हुआ",
"हुइ",
"हुई",
"हुए",
"हे",
"हें",
"है",
"हैं",
"हो",
"होता",
"होति",
"होती",
"होते",
"होना",
"होने",
],
"id": [
"ada",
"adalah",
"adanya",
"adapun",
"agak",
"agaknya",
"agar",
"akan",
"akankah",
"akhirnya",
"aku",
"akulah",
"amat",
"amatlah",
"anda",
"andalah",
"antar",
"antara",
"antaranya",
"apa",
"apaan",
"apabila",
"apakah",
"apalagi",
"apatah",
"atau",
"ataukah",
"ataupun",
"bagai",
"bagaikan",
"bagaimana",
"bagaimanakah",
"bagaimanapun",
"bagi",
"bahkan",
"bahwa",
"bahwasanya",
"banyak",
"beberapa",
"begini",
"beginian",
"beginikah",
"beginilah",
"begitu",
"begitukah",
"begitulah",
"begitupun",
"belum",
"belumlah",
"berapa",
"berapakah",
"berapalah",
"berapapun",
"bermacam",
"bersama",
"betulkah",
"biasa",
"biasanya",
"bila",
"bilakah",
"bisa",
"bisakah",
"boleh",
"bolehkah",
"bolehlah",
"buat",
"bukan",
"bukankah",
"bukanlah",
"bukannya",
"cuma",
"dahulu",
"dalam",
"dan",
"dapat",
"dari",
"daripada",
"dekat",
"demi",
"demikian",
"demikianlah",
"dengan",
"depan",
"di",
"dia",
"dialah",
"diantara",
"diantaranya",
"dikarenakan",
"dini",
"diri",
"dirinya",
"disini",
"disinilah",
"dong",
"dulu",
"enggak",
"enggaknya",
"entah",
"entahlah",
"hal",
"hampir",
"hanya",
"hanyalah",
"harus",
"haruslah",
"harusnya",
"hendak",
"hendaklah",
"hendaknya",
"hingga",
"ia",
"ialah",
"ibarat",
"ingin",
"inginkah",
"inginkan",
"ini",
"inikah",
"inilah",
"itu",
"itukah",
"itulah",
"jangan",
"jangankan",
"janganlah",
"jika",
"jikalau",
"juga",
"justru",
"kala",
"kalau",
"kalaulah",
"kalaupun",
"kalian",
"kami",
"kamilah",
"kamu",
"kamulah",
"kan",
"kapan",
"kapankah",
"kapanpun",
"karena",
"karenanya",
"ke",
"kecil",
"kemudian",
"kenapa",
"kepada",
"kepadanya",
"ketika",
"khususnya",
"kini",
"kinilah",
"kiranya",
"kita",
"kitalah",
"kok",
"lagi",
"lagian",
"lah",
"lain",
"lainnya",
"lalu",
"lama",
"lamanya",
"lebih",
"macam",
"maka",
"makanya",
"makin",
"malah",
"malahan",
"mampu",
"mampukah",
"mana",
"manakala",
"manalagi",
"masih",
"masihkah",
"masing",
"mau",
"maupun",
"melainkan",
"melalui",
"memang",
"mengapa",
"mereka",
"merekalah",
"merupakan",
"meski",
"meskipun",
"mungkin",
"mungkinkah",
"nah",
"namun",
"nanti",
"nantinya",
"nyaris",
"oleh",
"olehnya",
"pada",
"padahal",
"padanya",
"paling",
"pantas",
"para",
"pasti",
"pastilah",
"per",
"percuma",
"pernah",
"pula",
"pun",
"rupanya",
"saat",
"saatnya",
"saja",
"sajalah",
"saling",
"sama",
"sambil",
"sampai",
"sana",
"sangat",
"sangatlah",
"saya",
"sayalah",
"se",
"sebab",
"sebabnya",
"sebagai",
"sebagaimana",
"sebagainya",
"sebaliknya",
"sebanyak",
"sebegini",
"sebegitu",
"sebelum",
"sebelumnya",
"sebenarnya",
"seberapa",
"sebetulnya",
"sebisanya",
"sebuah",
"sedang",
"sedangkan",
"sedemikian",
"sedikit",
"sedikitnya",
"segala",
"segalanya",
"segera",
"seharusnya",
"sehingga",
"sejak",
"sejenak",
"sekali",
"sekalian",
"sekaligus",
"sekalipun",
"sekarang",
"seketika",
"sekiranya",
"sekitar",
"sekitarnya",
"sela",
"selagi",
"selain",
"selaku",
"selalu",
"selama",
"selamanya",
"seluruh",
"seluruhnya",
"semacam",
"semakin",
"semasih",
"semaunya",
"sementara",
"sempat",
"semua",
"semuanya",
"semula",
"sendiri",
"sendirinya",
"seolah",
"seorang",
"sepanjang",
"sepantasnya",
"sepantasnyalah",
"seperti",
"sepertinya",
"sering",
"seringnya",
"serta",
"serupa",
"sesaat",
"sesama",
"sesegera",
"sesekali",
"seseorang",
"sesuatu",
"sesuatunya",
"sesudah",
"sesudahnya",
"setelah",
"seterusnya",
"setiap",
"setidaknya",
"sewaktu",
"siapa",
"siapakah",
"siapapun",
"sini",
"sinilah",
"suatu",
"sudah",
"sudahkah",
"sudahlah",
"supaya",
"tadi",
"tadinya",
"tak",
"tanpa",
"tapi",
"telah",
"tentang",
"tentu",
"tentulah",
"tentunya",
"terdiri",
"terhadap",
"terhadapnya",
"terlalu",
"terlebih",
"tersebut",
"tersebutlah",
"tertentu",
"tetapi",
"tiap",
"tidak",
"tidakkah",
"tidaklah",
"toh",
"waduh",
"wah",
"wahai",
"walau",
"walaupun",
"wong",
"yaitu",
"yakni",
"yang",
],
"mr": [
"अधिक",
"अनेक",
"अशी",
"असलयाचे",
"असलेल्या",
"असा",
"असून",
"असे",
"आज",
"आणि",
"आता",
"आपल्या",
"आला",
"आली",
"आले",
"आहे",
"आहेत",
"एक",
"एका",
"कमी",
"करणयात",
"करून",
"का",
"काम",
"काय",
"काही",
"किवा",
"की",
"केला",
"केली",
"केले",
"कोटी",
"गेल्या",
"घेऊन",
"जात",
"झाला",
"झाली",
"झाले",
"झालेल्या",
"टा",
"डॉ",
"तर",
"तरी",
"तसेच",
"ता",
"ती",
"तीन",
"ते",
"तो",
"त्या",
"त्याचा",
"त्याची",
"त्याच्या",
"त्याना",
"त्यानी",
"त्यामुळे",
"त्री",
"दिली",
"दोन",
"न",
"नाही",
"निर्ण्य",
"पण",
"पम",
"परयतन",
"पाटील",
"म",
"मात्र",
"माहिती",
"मी",
"मुबी",
"म्हणजे",
"म्हणाले",
"म्हणून",
"या",
"याचा",
"याची",
"याच्या",
"याना",
"यानी",
"येणार",
"येत",
"येथील",
"येथे",
"लाख",
"व",
"व्यकत",
"सर्व",
"सागित्ले",
"सुरू",
"हजार",
"हा",
"ही",
"हे",
"होणार",
"होत",
"होता",
"होती",
"होते",
],
"pt": [
"a",
"acerca",
"adeus",
"agora",
"ainda",
"algmas",
"algo",
"algumas",
"alguns",
"ali",
"além",
"ambos",
"ano",
"anos",
"antes",
"ao",
"aos",
"apenas",
"apoio",
"apontar",
"após",
"aquela",
"aquelas",
"aquele",
"aqueles",
"aqui",
"aquilo",
"as",
"assim",
"através",
"atrás",
"até",
"aí",
"baixo",
"bastante",
"bem",
"bom",
"breve",
"cada",
"caminho",
"catorze",
"cedo",
"cento",
"certamente",
"certeza",
"cima",
"cinco",
"coisa",
"com",
"como",
"comprido",
"conhecido",
"conselho",
"contra",
"corrente",
"custa",
"cá",
"da",
"daquela",
"daquele",
"dar",
"das",
"de",
"debaixo",
"demais",
"dentro",
"depois",
"desde",
"desligado",
"dessa",
"desse",
"desta",
"deste",
"deve",
"devem",
"deverá",
"dez",
"dezanove",
"dezasseis",
"dezassete",
"dezoito",
"dia",
"diante",
"direita",
"diz",
"dizem",
"dizer",
"do",
"dois",
"dos",
"doze",
"duas",
"dá",
"dão",
"dúvida",
"e",
"ela",
"elas",
"ele",
"eles",
"em",
"embora",
"enquanto",
"entre",
"então",
"era",
"essa",
"essas",
"esse",
"esses",
"esta",
"estado",
"estar",
"estará",
"estas",
"estava",
"este",
"estes",
"esteve",
"estive",
"estivemos",
"estiveram",
"estiveste",
"estivestes",
"estou",
"está",
"estás",
"estão",
"eu",
"exemplo",
"falta",
"fará",
"favor",
"faz",
"fazeis",
"fazem",
"fazemos",
"fazer",
"fazes",
"fazia",
"faço",
"fez",
"fim",
"final",
"foi",
"fomos",
"for",
"fora",
"foram",
"forma",
"foste",
"fostes",
"fui",
"geral",
"grande",
"grandes",
"grupo",
"hoje",
"horas",
"há",
"iniciar",
"inicio",
"ir",
"irá",
"isso",
"ista",
"iste",
"isto",
"já",
"lado",
"ligado",
"local",
"logo",
"longe",
"lugar",
"lá",
"maior",
"maioria",
"maiorias",
"mais",
"mal",
"mas",
"me",
"meio",
"menor",
"menos",
"meses",
"mesmo",
"meu",
"meus",
"mil",
"minha",
"minhas",
"momento",
"muito",
"muitos",
"máximo",
"mês",
"na",
"nada",
"naquela",
"naquele",
"nas",
"nem",
"nenhuma",
"nessa",
"nesse",
"nesta",
"neste",
"no",
"noite",
"nome",
"nos",
"nossa",
"nossas",
"nosso",
"nossos",
"nova",
"nove",
"novo",
"novos",
"num",
"numa",
"nunca",
"não",
"nível",
"nós",
"número",
"o",
"obra",
"obrigada",
"obrigado",
"oitava",
"oitavo",
"oito",
"onde",
"ontem",
"onze",
"os",
"ou",
"outra",
"outras",
"outro",
"outros",
"para",
"parece",
"parte",
"partir",
"pegar",
"pela",
"pelas",
"pelo",
"pelos",
"perto",
"pessoas",
"pode",
"podem",
"poder",
"poderá",
"podia",
"ponto",
"pontos",
"por",
"porque",
"porquê",
"posição",
"possivelmente",
"posso",
"possível",
"pouca",
"pouco",
"povo",
"primeira",
"primeiro",
"promeiro",
"próprio",
"próximo",
"puderam",
"pôde",
"põe",
"põem",
"qual",
"qualquer",
"quando",
"quanto",
"quarta",
"quarto",
"quatro",
"que",
"quem",
"quer",
"quero",
"questão",
"quieto",
"quinta",
"quinto",
"quinze",
"quê",
"relação",
"sabe",
"saber",
"se",
"segunda",
"segundo",
"sei",
"seis",
"sem",
"sempre",
"ser",
"seria",
"sete",
"seu",
"seus",
"sexta",
"sexto",
"sim",
"sistema",
"sob",
"sobre",
"sois",
"somente",
"somos",
"sou",
"sua",
"suas",
"são",
"sétima",
"sétimo",
"tal",
"talvez",
"também",
"tanto",
"tarde",
"te",
"tem",
"temos",
"tempo",
"tendes",
"tenho",
"tens",
"tentar",
"tentaram",
"tente",
"tentei",
"ter",
"terceira",
"terceiro",
"teu",
"teus",
"teve",
"tipo",
"tive",
"tivemos",
"tiveram",
"tiveste",
"tivestes",
"toda",
"todas",
"todo",
"todos",
"trabalhar",
"trabalho",
"treze",
"três",
"tu",
"tua",
"tuas",
"tudo",
"tão",
"têm",
"um",
"uma",
"umas",
"uns",
"usa",
"usar",
"vai",
"vais",
"valor",
"veja",
"vem",
"vens",
"ver",
"verdade",
"verdadeiro",
"vez",
"vezes",
"viagem",
"vindo",
"vinte",
"você",
"vocês",
"vos",
"vossa",
"vossas",
"vosso",
"vossos",
"vários",
"vão",
"vêm",
"vós",
"zero",
"à",
"às",
"área",
"é",
"és",
"último",
],
"sw": [
"akasema",
"alikuwa",
"alisema",
"baada",
"basi",
"bila",
"cha",
"chini",
"hadi",
"hapo",
"hata",
"hivyo",
"hiyo",
"huku",
"huo",
"ili",
"ilikuwa",
"juu",
"kama",
"karibu",
"katika",
"kila",
"kima",
"kisha",
"kubwa",
"kutoka",
"kuwa",
"kwa",
"kwamba",
"kwenda",
"kwenye",
"la",
"lakini",
"mara",
"mdogo",
"mimi",
"mkubwa",
"mmoja",
"moja",
"muda",
"mwenye",
"na",
"naye",
"ndani",
"ng",
"ni",
"nini",
"nonkungu",
"pamoja",
"pia",
"sana",
"sasa",
"sauti",
"tafadhali",
"tena",
"tu",
"vile",
"wa",
"wakati",
"wake",
"walikuwa",
"wao",
"watu",
"wengine",
"wote",
"ya",
"yake",
"yangu",
"yao",
"yeye",
"yule",
"za",
"zaidi",
"zake",
],
"ur": [
"آئی",
"آئے",
"آج",
"آخر",
"آخرکبر",
"آدهی",
"آًب",
"آٹھ",
"آیب",
"اة",
"اخبزت",
"اختتبم",
"ادھر",
"ارد",
"اردگرد",
"ارکبى",
"اش",
"اضتعوبل",
"اضتعوبلات",
"اضطرذ",
"اضکب",
"اضکی",
"اضکے",
"اطراف",
"اغیب",
"افراد",
"الگ",
"اور",
"اوًچب",
"اوًچبئی",
"اوًچی",
"اوًچے",
"اى",
"اً",
"اًذر",
"اًہیں",
"اٹھبًب",
"اپٌب",
"اپٌے",
"اچھب",
"اچھی",
"اچھے",
"اکثر",
"اکٹھب",
"اکٹھی",
"اکٹھے",
"اکیلا",
"اکیلی",
"اکیلے",
"اگرچہ",
"اہن",
"ایطے",
"ایک",
"ب",
"ت",
"تبزٍ",
"تت",
"تر",
"ترتیت",
"تریي",
"تعذاد",
"تن",
"تو",
"توبم",
"توہی",
"توہیں",
"تٌہب",
"تک",
"تھب",
"تھوڑا",
"تھوڑی",
"تھوڑے",
"تھی",
"تھے",
"تیي",
"ثب",
"ثبئیں",
"ثبترتیت",
"ثبری",
"ثبرے",
"ثبعث",
"ثبلا",
"ثبلترتیت",
"ثبہر",
"ثدبئے",
"ثرآں",
"ثراں",
"ثرش",
"ثعذ",
"ثغیر",
"ثلٌذ",
"ثلٌذوثبلا",
"ثلکہ",
"ثي",
"ثٌب",
"ثٌبرہب",
"ثٌبرہی",
"ثٌبرہے",
"ثٌبًب",
"ثٌذ",
"ثٌذکرو",
"ثٌذکرًب",
"ثٌذی",
"ثڑا",
"ثڑوں",
"ثڑی",
"ثڑے",
"ثھر",
"ثھرا",
"ثھراہوا",
"ثھرپور",
"ثھی",
"ثہت",
"ثہتر",
"ثہتری",
"ثہتریي",
"ثیچ",
"ج",
"خب",
"خبرہب",
"خبرہی",
"خبرہے",
"خبهوظ",
"خبًب",
"خبًتب",
"خبًتی",
"خبًتے",
"خبًٌب",
"خت",
"ختن",
"خجکہ",
"خص",
"خططرذ",
"خلذی",
"خو",
"خواى",
"خوًہی",
"خوکہ",
"خٌبة",
"خگہ",
"خگہوں",
"خگہیں",
"خیطب",
"خیطبکہ",
"در",
"درخبت",
"درخہ",
"درخے",
"درزقیقت",
"درضت",
"دش",
"دفعہ",
"دلچطپ",
"دلچطپی",
"دلچطپیبں",
"دو",
"دور",
"دوراى",
"دوضرا",
"دوضروں",
"دوضری",
"دوضرے",
"دوًوں",
"دکھبئیں",
"دکھبتب",
"دکھبتی",
"دکھبتے",
"دکھبو",
"دکھبًب",
"دکھبیب",
"دی",
"دیب",
"دیتب",
"دیتی",
"دیتے",
"دیر",
"دیٌب",
"دیکھو",
"دیکھٌب",
"دیکھی",
"دیکھیں",
"دے",
"ر",
"راضتوں",
"راضتہ",
"راضتے",
"رریعہ",
"رریعے",
"رکي",
"رکھ",
"رکھب",
"رکھتب",
"رکھتبہوں",
"رکھتی",
"رکھتے",
"رکھی",
"رکھے",
"رہب",
"رہی",
"رہے",
"ز",
"زبصل",
"زبضر",
"زبل",
"زبلات",
"زبلیہ",
"زصوں",
"زصہ",
"زصے",
"زقبئق",
"زقیتیں",
"زقیقت",
"زکن",
"زکویہ",
"زیبدٍ",
"صبف",
"صسیر",
"صفر",
"صورت",
"صورتسبل",
"صورتوں",
"صورتیں",
"ض",
"ضبت",
"ضبتھ",
"ضبدٍ",
"ضبرا",
"ضبرے",
"ضبل",
"ضبلوں",
"ضت",
"ضرور",
"ضرورت",
"ضروری",
"ضلطلہ",
"ضوچ",
"ضوچب",
"ضوچتب",
"ضوچتی",
"ضوچتے",
"ضوچو",
"ضوچٌب",
"ضوچی",
"ضوچیں",
"ضکب",
"ضکتب",
"ضکتی",
"ضکتے",
"ضکٌب",
"ضکی",
"ضکے",
"ضیذھب",
"ضیذھی",
"ضیذھے",
"ضیکٌڈ",
"ضے",
"طرف",
"طریق",
"طریقوں",
"طریقہ",
"طریقے",
"طور",
"طورپر",
"ظبہر",
"ع",
"عذد",
"عظین",
"علاقوں",
"علاقہ",
"علاقے",
"علاوٍ",
"عووهی",
"غبیذ",
"غخص",
"غذ",
"غروع",
"غروعبت",
"غے",
"فرد",
"فی",
"ق",
"قجل",
"قجیلہ",
"قطن",
"لئے",
"لا",
"لازهی",
"لو",
"لوجب",
"لوجی",
"لوجے",
"لوسبت",
"لوسہ",
"لوگ",
"لوگوں",
"لڑکپي",
"لگتب",
"لگتی",
"لگتے",
"لگٌب",
"لگی",
"لگیں",
"لگے",
"لی",
"لیب",
"لیٌب",
"لیں",
"لے",
"ه",
"هتعلق",
"هختلف",
"هسترم",
"هسترهہ",
"هسطوش",
"هسیذ",
"هطئلہ",
"هطئلے",
"هطبئل",
"هطتعول",
"هطلق",
"هعلوم",
"هػتول",
"هلا",
"هوکي",
"هوکٌبت",
"هوکٌہ",
"هٌبضت",
"هڑا",
"هڑًب",
"هڑے",
"هکول",
"هگر",
"هہرثبى",
"هیرا",
"هیری",
"هیرے",
"هیں",
"و",
"وار",
"والے",
"وٍ",
"ًئی",
"ًئے",
"ًب",
"ًبپطٌذ",
"ًبگسیر",
"ًطجت",
"ًقطہ",
"ًو",
"ًوخواى",
"ًکبلٌب",
"ًکتہ",
"ًہ",
"ًہیں",
"ًیب",
"ًے",
"ٓ آش",
"ٹھیک",
"پبئے",
"پبش",
"پبًب",
"پبًچ",
"پر",
"پراًب",
"پطٌذ",
"پل",
"پورا",
"پوچھب",
"پوچھتب",
"پوچھتی",
"پوچھتے",
"پوچھو",
"پوچھوں",
"پوچھٌب",
"پوچھیں",
"پچھلا",
"پھر",
"پہلا",
"پہلی",
"پہلےضی",
"پہلےضے",
"پہلےضےہی",
"پیع",
"چبر",
"چبہب",
"چبہٌب",
"چبہے",
"چلا",
"چلو",
"چلیں",
"چلے",
"چکب",
"چکی",
"چکیں",
"چکے",
"چھوٹب",
"چھوٹوں",
"چھوٹی",
"چھوٹے",
"چھہ",
"چیسیں",
"ڈھوًڈا",
"ڈھوًڈلیب",
"ڈھوًڈو",
"ڈھوًڈًب",
"ڈھوًڈی",
"ڈھوًڈیں",
"ک",
"کئی",
"کئے",
"کب",
"کبفی",
"کبم",
"کت",
"کجھی",
"کرا",
"کرتب",
"کرتبہوں",
"کرتی",
"کرتے",
"کرتےہو",
"کررہب",
"کررہی",
"کررہے",
"کرو",
"کرًب",
"کریں",
"کرے",
"کطی",
"کل",
"کن",
"کوئی",
"کوتر",
"کورا",
"کوروں",
"کورٍ",
"کورے",
"کوطي",
"کوى",
"کوًطب",
"کوًطی",
"کوًطے",
"کھولا",
"کھولو",
"کھولٌب",
"کھولی",
"کھولیں",
"کھولے",
"کہ",
"کہب",
"کہتب",
"کہتی",
"کہتے",
"کہو",
"کہوں",
"کہٌب",
"کہی",
"کہیں",
"کہے",
"کی",
"کیب",
"کیطب",
"کیطرف",
"کیطے",
"کیلئے",
"کیوًکہ",
"کیوں",
"کیے",
"کے",
"کےثعذ",
"کےرریعے",
"گئی",
"گئے",
"گب",
"گرد",
"گروٍ",
"گروپ",
"گروہوں",
"گٌتی",
"گی",
"گیب",
"گے",
"ہر",
"ہن",
"ہو",
"ہوئی",
"ہوئے",
"ہوا",
"ہوبرا",
"ہوبری",
"ہوبرے",
"ہوتب",
"ہوتی",
"ہوتے",
"ہورہب",
"ہورہی",
"ہورہے",
"ہوضکتب",
"ہوضکتی",
"ہوضکتے",
"ہوًب",
"ہوًی",
"ہوًے",
"ہوچکب",
"ہوچکی",
"ہوچکے",
"ہوگئی",
"ہوگئے",
"ہوگیب",
"ہوں",
"ہی",
"ہیں",
"ہے",
"ی",
"یقیٌی",
"یہ",
"یہبں",
],
"vi": [
"a ha",
"a-lô",
"ai",
"ai ai",
"ai nấy",
"alô",
"amen",
"anh",
"bao giờ",
"bao lâu",
"bao nhiêu",
"bao nả",
"bay biến",
"biết",
"biết bao",
"biết bao nhiêu",
"biết chừng nào",
"biết mấy",
"biết đâu",
"biết đâu chừng",
"biết đâu đấy",
"bà",
"bài",
"bác",
"bây bẩy",
"bây chừ",
"bây giờ",
"bây nhiêu",
"bèn",
"béng",
"bông",
"bạn",
"bản",
"bất chợt",
"bất cứ",
"bất giác",
"bất kì",
"bất kể",
"bất kỳ",
"bất luận",
"bất nhược",
"bất quá",
"bất thình lình",
"bất tử",
"bất đồ",
"bấy",
"bấy chầy",
"bấy chừ",
"bấy giờ",
"bấy lâu",
"bấy lâu nay",
"bấy nay",
"bấy nhiêu",
"bập bà bập bõm",
"bập bõm",
"bắt đầu từ",
"bằng",
"bằng không",
"bằng nấy",
"bằng ấy",
"bển",
"bệt",
"bị",
"bỏ mẹ",
"bỗng",
"bỗng chốc",
"bỗng dưng",
"bỗng không",
"bỗng nhiên",
"bỗng đâu",
"bộ",
"bội phần",
"bớ",
"bởi",
"bởi chưng",
"bởi nhưng",
"bởi thế",
"bởi vì",
"bởi vậy",
"bức",
"cao",
"cha",
"cha chả",
"chao ôi",
"chiếc",
"cho",
"cho nên",
"cho tới",
"cho tới khi",
"cho đến",
"cho đến khi",
"choa",
"chu cha",
"chui cha",
"chung cục",
"chung qui",
"chung quy",
"chung quy lại",
"chuyện",
"chành chạnh",
"chí chết",
"chính",
"chính là",
"chính thị",
"chùn chùn",
"chùn chũn",
"chú",
"chú mày",
"chú mình",
"chúng mình",
"chúng ta",
"chúng tôi",
"chăn chắn",
"chăng",
"chưa",
"chầm chập",
"chậc",
"chắc",
"chắc hẳn",
"chẳng lẽ",
"chẳng những",
"chẳng nữa",
"chẳng phải",
"chết nỗi",
"chết thật",
"chết tiệt",
"chỉ",
"chỉn",
"chốc chốc",
"chớ",
"chớ chi",
"chợt",
"chủn",
"chứ",
"chứ lị",
"coi bộ",
"coi mòi",
"con",
"cu cậu",
"cuốn",
"cuộc",
"càng",
"các",
"cái",
"cây",
"còn",
"có",
"có chăng là",
"có dễ",
"có thể",
"có vẻ",
"cóc khô",
"cô",
"cô mình",
"công nhiên",
"cùng",
"cùng cực",
"cùng nhau",
"cùng với",
"căn",
"căn cắt",
"cũng",
"cũng như",
"cũng vậy",
"cũng vậy thôi",
"cơ",
"cơ chừng",
"cơ hồ",
"cơ mà",
"cơn",
"cả",
"cả thảy",
"cả thể",
"cảm ơn",
"cần",
"cật lực",
"cật sức",
"cậu",
"cổ lai",
"của",
"cứ",
"cứ việc",
"cực lực",
"do",
"do vì",
"do vậy",
"do đó",
"duy",
"dào",
"dì",
"dù cho",
"dù rằng",
"dưới",
"dạ",
"dần dà",
"dần dần",
"dầu sao",
"dẫu",
"dẫu sao",
"dễ sợ",
"dễ thường",
"dở chừng",
"dữ",
"em",
"giữa",
"gì",
"hay",
"hoàn toàn",
"hoặc",
"hơn",
"hầu hết",
"họ",
"hỏi",
"khi",
"khác",
"không",
"luôn",
"là",
"làm",
"lên",
"lúc",
"lại",
"lần",
"lớn",
"muốn",
"mà",
"mình",
"mỗi",
"một",
"một cách",
"mới",
"mợ",
"ngay",
"ngay cả",
"ngay khi",
"ngay lúc",
"ngay lập tức",
"ngay tức khắc",
"ngay từ",
"nghe chừng",
"nghe đâu",
"nghen",
"nghiễm nhiên",
"nghỉm",
"ngoài",
"ngoài ra",
"ngoải",
"ngày",
"ngày càng",
"ngày ngày",
"ngày xưa",
"ngày xửa",
"ngôi",
"ngõ hầu",
"ngăn ngắt",
"ngươi",
"người",
"ngọn",
"ngọt",
"ngộ nhỡ",
"nh",
"nhau",
"nhiên hậu",
"nhiều",
"nhiệt liệt",
"nhung nhăng",
"nhà",
"nhân dịp",
"nhân tiện",
"nhé",
"nhón nhén",
"như",
"như chơi",
"như không",
"như quả",
"như thể",
"như tuồng",
"như vậy",
"nhưng",
"nhưng mà",
"nhược bằng",
"nhất",
"nhất loạt",
"nhất luật",
"nhất mực",
"nhất nhất",
"nhất quyết",
"nhất sinh",
"nhất thiết",
"nhất tâm",
"nhất tề",
"nhất đán",
"nhất định",
"nhận",
"nhỉ",
"nhỡ ra",
"những",
"những ai",
"những như",
"nào",
"này",
"nên",
"nên chi",
"nó",
"nóc",
"nói",
"năm",
"nơi",
"nấy",
"nếu",
"nếu như",
"nền",
"nọ",
"nớ",
"nức nở",
"nữa",
"oai oái",
"oái",
"pho",
"phè",
"phóc",
"phót",
"phăn phắt",
"phương chi",
"phải",
"phải chi",
"phải chăng",
"phắt",
"phỉ phui",
"phỏng",
"phỏng như",
"phốc",
"phụt",
"phứt",
"qua",
"qua quít",
"qua quýt",
"quyết",
"quyết nhiên",
"quyển",
"quá",
"quá chừng",
"quá lắm",
"quá sá",
"quá thể",
"quá trời",
"quá xá",
"quá đỗi",
"quá độ",
"quá ư",
"quý hồ",
"quả",
"quả là",
"quả tang",
"quả thật",
"quả tình",
"quả vậy",
"quả đúng",
"ra",
"ra phết",
"ra sao",
"ra trò",
"ren rén",
"riu ríu",
"riêng",
"riệt",
"rày",
"ráo",
"ráo trọi",
"rén",
"rích",
"rón rén",
"rút cục",
"răng",
"rất",
"rằng",
"rằng là",
"rốt cuộc",
"rốt cục",
"rồi",
"rứa",
"sa sả",
"sao",
"sau",
"sau chót",
"sau cuối",
"sau cùng",
"sau đó",
"so",
"song le",
"suýt",
"sì",
"sạch",
"sất",
"sắp",
"sẽ",
"số",
"số là",
"sốt sột",
"sở dĩ",
"sự",
"tanh",
"tha hồ",
"than ôi",
"thanh",
"theo",
"thi thoảng",
"thoạt",
"thoạt nhiên",
"thoắt",
"thuần",
"thà",
"thà là",
"thà rằng",
"thành ra",
"thành thử",
"thái quá",
"tháng",
"thì",
"thì thôi",
"thình lình",
"thím",
"thôi",
"thúng thắng",
"thương ôi",
"thường",
"thảo hèn",
"thảo nào",
"thấy",
"thẩy",
"thậm",
"thậm chí",
"thật lực",
"thật ra",
"thật vậy",
"thế",
"thế là",
"thế mà",
"thế nào",
"thế nên",
"thế ra",
"thế thì",
"thế à",
"thếch",
"thỉnh thoảng",
"thỏm",
"thốc",
"thốc tháo",
"thốt",
"thốt nhiên",
"thộc",
"thời gian",
"thục mạng",
"thửa",
"thực ra",
"thực sự",
"thực vậy",
"tiếp theo",
"tiếp đó",
"tiện thể",
"toà",
"toé khói",
"toẹt",
"trong",
"trên",
"trước",
"trước kia",
"trước nay",
"trước tiên",
"trước đây",
"trước đó",
"trếu tráo",
"trển",
"trệt",
"trệu trạo",
"trỏng",
"trời đất ơi",
"trừ phi",
"tuy",
"tuy nhiên",
"tuy rằng",
"tuy thế",
"tuy vậy",
"tuyệt nhiên",
"tuần tự",
"tuốt luốt",
"tuốt tuồn tuột",
"tuốt tuột",
"tà tà",
"tênh",
"tít mù",
"tò te",
"tôi",
"tông tốc",
"tù tì",
"tăm tắp",
"tại",
"tại vì",
"tấm",
"tấn",
"tất cả",
"tất thảy",
"tất tần tật",
"tất tật",
"tắp",
"tắp lự",
"tọt",
"tỏ ra",
"tỏ vẻ",
"tốc tả",
"tối ư",
"tột",
"tớ",
"tới",
"tức thì",
"tức tốc",
"từ",
"từng",
"tự vì",
"tựu trung",
"veo",
"veo veo",
"việc",
"vung thiên địa",
"vung tàn tán",
"vung tán tàn",
"và",
"vào",
"vâng",
"vèo",
"vì",
"vì chưng",
"vì thế",
"vì vậy",
"ví bằng",
"ví dù",
"ví phỏng",
"ví thử",
"vô hình trung",
"vô kể",
"vô luận",
"vô vàn",
"văng tê",
"vạn nhất",
"vả chăng",
"vả lại",
"vẫn",
"vậy",
"vậy là",
"vậy thì",
"về",
"vị tất",
"vốn dĩ",
"với",
"với lại",
"vở",
"vụt",
"vừa",
"vừa mới",
"xa xả",
"xiết bao",
"xon xón",
"xoành xoạch",
"xoét",
"xoẳn",
"xoẹt",
"xuất kì bất ý",
"xuất kỳ bất ý",
"xuể",
"xuống",
"xăm xúi",
"xăm xăm",
"xăm xắm",
"xềnh xệch",
"xệp",
"à",
"à ơi",
"ào",
"á",
"á à",
"ái",
"ái chà",
"ái dà",
"áng",
"âu là",
"ô hay",
"ô hô",
"ô kê",
"ô kìa",
"ôi chao",
"ôi thôi",
"ông",
"úi",
"úi chà",
"úi dào",
"ý",
"ý chừng",
"ý da",
"đang",
"đi",
"điều",
"đành đạch",
"đáng lí",
"đáng lý",
"đáng lẽ",
"đánh đùng",
"đáo để",
"đây",
"đã",
"đó",
"được",
"đại loại",
"đại nhân",
"đại phàm",
"đại để",
"đến",
"đến nỗi",
"đều",
"để",
"ơ",
"ơ hay",
"ơ kìa",
"ơi",
"ư",
"ạ",
"ạ ơi",
"ấy",
"ầu ơ",
"ắt",
"ắt hẳn",
"ắt là",
"ối dào",
"ối giời",
"ối giời ơi",
"ồ",
"ổng",
"ớ",
"ờ",
"ở",
"ở trên",
"ủa",
"ứ hự",
"ứ ừ",
"ừ",
"ử",
],
"yo": [
"a",
"an",
"bá",
"bí",
"bẹ̀rẹ̀",
"fún",
"fẹ́",
"gbogbo",
"inú",
"jù",
"jẹ",
"jẹ́",
"kan",
"kì",
"kí",
"kò",
"láti",
"lè",
"lọ",
"mi",
"mo",
"máa",
"mọ̀",
"ni",
"náà",
"ní",
"nígbà",
"nítorí",
"nǹkan",
"o",
"padà",
"pé",
"púpọ̀",
"pẹ̀lú",
"rẹ̀",
"sì",
"sí",
"sínú",
"ṣ",
"ti",
"tí",
"wà",
"wá",
"wọn",
"wọ́n",
"yìí",
"àti",
"àwọn",
"é",
"í",
"òun",
"ó",
"ń",
"ńlá",
"ṣe",
"ṣé",
"ṣùgbọ́n",
"ẹmọ́",
"ọjọ́",
"ọ̀pọ̀lọpọ̀",
],
"zh": [
"、",
"。",
"〈",
"〉",
"《",
"》",
"一",
"一切",
"一则",
"一方面",
"一旦",
"一来",
"一样",
"一般",
"七",
"万一",
"三",
"上下",
"不仅",
"不但",
"不光",
"不单",
"不只",
"不如",
"不怕",
"不惟",
"不成",
"不拘",
"不比",
"不然",
"不特",
"不独",
"不管",
"不论",
"不过",
"不问",
"与",
"与其",
"与否",
"与此同时",
"且",
"两者",
"个",
"临",
"为",
"为了",
"为什么",
"为何",
"为着",
"乃",
"乃至",
"么",
"之",
"之一",
"之所以",
"之类",
"乌乎",
"乎",
"乘",
"九",
"也",
"也好",
"也罢",
"了",
"二",
"于",
"于是",
"于是乎",
"云云",
"五",
"人家",
"什么",
"什么样",
"从",
"从而",
"他",
"他人",
"他们",
"以",
"以便",
"以免",
"以及",
"以至",
"以至于",
"以致",
"们",
"任",
"任何",
"任凭",
"似的",
"但",
"但是",
"何",
"何况",
"何处",
"何时",
"作为",
"你",
"你们",
"使得",
"例如",
"依",
"依照",
"俺",
"俺们",
"倘",
"倘使",
"倘或",
"倘然",
"倘若",
"借",
"假使",
"假如",
"假若",
"像",
"八",
"六",
"兮",
"关于",
"其",
"其一",
"其中",
"其二",
"其他",
"其余",
"其它",
"其次",
"具体地说",
"具体说来",
"再者",
"再说",
"冒",
"冲",
"况且",
"几",
"几时",
"凭",
"凭借",
"则",
"别",
"别的",
"别说",
"到",
"前后",
"前者",
"加之",
"即",
"即令",
"即使",
"即便",
"即或",
"即若",
"又",
"及",
"及其",
"及至",
"反之",
"反过来",
"反过来说",
"另",
"另一方面",
"另外",
"只是",
"只有",
"只要",
"只限",
"叫",
"叮咚",
"可",
"可以",
"可是",
"可见",
"各",
"各个",
"各位",
"各种",
"各自",
"同",
"同时",
"向",
"向着",
"吓",
"吗",
"否则",
"吧",
"吧哒",
"吱",
"呀",
"呃",
"呕",
"呗",
"呜",
"呜呼",
"呢",
"呵",
"呸",
"呼哧",
"咋",
"和",
"咚",
"咦",
"咱",
"咱们",
"咳",
"哇",
"哈",
"哈哈",
"哉",
"哎",
"哎呀",
"哎哟",
"哗",
"哟",
"哦",
"哩",
"哪",
"哪个",
"哪些",
"哪儿",
"哪天",
"哪年",
"哪怕",
"哪样",
"哪边",
"哪里",
"哼",
"哼唷",
"唉",
"啊",
"啐",
"啥",
"啦",
"啪达",
"喂",
"喏",
"喔唷",
"嗡嗡",
"嗬",
"嗯",
"嗳",
"嘎",
"嘎登",
"嘘",
"嘛",
"嘻",
"嘿",
"四",
"因",
"因为",
"因此",
"因而",
"固然",
"在",
"在下",
"地",
"多",
"多少",
"她",
"她们",
"如",
"如上所述",
"如何",
"如其",
"如果",
"如此",
"如若",
"宁",
"宁可",
"宁愿",
"宁肯",
"它",
"它们",
"对",
"对于",
"将",
"尔后",
"尚且",
"就",
"就是",
"就是说",
"尽",
"尽管",
"岂但",
"己",
"并",
"并且",
"开外",
"开始",
"归",
"当",
"当着",
"彼",
"彼此",
"往",
"待",
"得",
"怎",
"怎么",
"怎么办",
"怎么样",
"怎样",
"总之",
"总的来看",
"总的来说",
"总的说来",
"总而言之",
"恰恰相反",
"您",
"慢说",
"我",
"我们",
"或",
"或是",
"或者",
"所",
"所以",
"打",
"把",
"抑或",
"拿",
"按",
"按照",
"换句话说",
"换言之",
"据",
"接着",
"故",
"故此",
"旁人",
"无宁",
"无论",
"既",
"既是",
"既然",
"时候",
"是",
"是的",
"替",
"有",
"有些",
"有关",
"有的",
"望",
"朝",
"朝着",
"本",
"本着",
"来",
"来着",
"极了",
"果然",
"果真",
"某",
"某个",
"某些",
"根据",
"正如",
"此",
"此外",
"此间",
"毋宁",
"每",
"每当",
"比",
"比如",
"比方",
"沿",
"沿着",
"漫说",
"焉",
"然则",
"然后",
"然而",
"照",
"照着",
"甚么",
"甚而",
"甚至",
"用",
"由",
"由于",
"由此可见",
"的",
"的话",
"相对而言",
"省得",
"着",
"着呢",
"矣",
"离",
"第",
"等",
"等等",
"管",
"紧接着",
"纵",
"纵令",
"纵使",
"纵然",
"经",
"经过",
"结果",
"给",
"继而",
"综上所述",
"罢了",
"者",
"而",
"而且",
"而况",
"而外",
"而已",
"而是",
"而言",
"能",
"腾",
"自",
"自个儿",
"自从",
"自各儿",
"自家",
"自己",
"自身",
"至",
"至于",
"若",
"若是",
"若非",
"莫若",
"虽",
"虽则",
"虽然",
"虽说",
"被",
"要",
"要不",
"要不是",
"要不然",
"要么",
"要是",
"让",
"论",
"设使",
"设若",
"该",
"诸位",
"谁",
"谁知",
"赶",
"起",
"起见",
"趁",
"趁着",
"越是",
"跟",
"较",
"较之",
"边",
"过",
"还是",
"还有",
"这",
"这个",
"这么",
"这么些",
"这么样",
"这么点儿",
"这些",
"这会儿",
"这儿",
"这就是说",
"这时",
"这样",
"这边",
"这里",
"进而",
"连",
"连同",
"通过",
"遵照",
"那",
"那个",
"那么",
"那么些",
"那么样",
"那些",
"那会儿",
"那儿",
"那时",
"那样",
"那边",
"那里",
"鄙人",
"鉴于",
"阿",
"除",
"除了",
"除此之外",
"除非",
"随",
"随着",
"零",
"非但",
"非徒",
"靠",
"顺",
"顺着",
"首先",
"︿",
"!",
"#",
"$",
"%",
"&",
"(",
")",
"*",
"+",
",",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
":",
";",
"<",
">",
"?",
"@",
"[",
"]",
"{",
"|",
"}",
"~",
"¥",
],
}
| 16.605705
| 77
| 0.265198
|
2ba8c38efe0d3bdcad125b1c09123c9b5c7325e5
| 4,237
|
py
|
Python
|
org/tradesafe/bt/strategy_cci.py
|
shenbai/tradesafe
|
b6bb843288f535d7d146426fd40750f7484a16e6
|
[
"MIT"
] | null | null | null |
org/tradesafe/bt/strategy_cci.py
|
shenbai/tradesafe
|
b6bb843288f535d7d146426fd40750f7484a16e6
|
[
"MIT"
] | null | null | null |
org/tradesafe/bt/strategy_cci.py
|
shenbai/tradesafe
|
b6bb843288f535d7d146426fd40750f7484a16e6
|
[
"MIT"
] | 2
|
2021-08-21T17:26:29.000Z
|
2022-02-18T21:40:24.000Z
|
# coding:utf-8
from org.tradesafe.data.history_data import HistoryData
from org.tradesafe.bt.account import Account
from org.tradesafe.bt.strategy import AbstrictStrategy
from org.tradesafe.bt import strategy
from org.tradesafe.bt.order import Order
from org.tradesafe.bt.btdata import BtData
from org.tradesafe.bt.log import logging as logger
from datetime import datetime, timedelta
import traceback
import sys
from math import *
import talib
class StrategyCci(AbstrictStrategy):
'''
strategy
'''
def handle_tick(self, tick, data, row):
'''
Args:
tick: 当天日期yyyy-mm-dd
data: 从开始日期到当前日期的全部数据
row: 当前日期(当日)数据
Returns:
'''
yestoday = self.get_one_data(data, -1)
if row.cci < -100:
if not self.acount.buy_restriction_filter(data):
self.acount.buy(row.code, row.close, num=100000, date=tick)
if self.begin is None:
self.begin = tick
elif row.code in self.acount.positions and row.cci > 100:
if not self.acount.sell_restriction_filter(data):
self.acount.sell(row.code, row.close, num=100000, date=tick)
pass
if __name__ == '__main__':
# code = '600366'
start = '2016-01-01'
end = '2016-08-01'
X = []
hd = HistoryData()
# codes = hd.get_all_stock_code()
# data = hd.get_history_data_all(startDate=start, endDate=end)
# strategy.bench_mark(codes, data, start=start, end=end)
# sys.exit(0)
start = '2016-01-01'
end = '2016-08-01'
X = []
hd = HistoryData()
codes = hd.get_all_stock_code()
codes = ['600180', '000002']
a = StrategyCci(stock_pool=codes, start=start, end=end)
a.run()
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
matplotlib.style.use('ggplot')
# print a.acount.cash, a.acount.get_market_value(), a.acount.get_assets(), a.acount.get_position_profit()
# for ph in a.acount.history_positions.get_history(code):
# print ph
# print '############################# history order #####################'
t_times = 0
for code in codes:
t = pd.DataFrame(index=a.btData.datas[code].index)
# print a.btData.datas[code].head()
# t['c'] = a.btData.datas[code].close
# t['ma5'] = a.btData.datas[code].ma5
# t['ma10'] = a.btData.datas[code].ma10
# t['beta'] = a.btData.datas[code].beta
# t['l_angle'] = a.btData.datas[code].l_angle
# t['l_intercept'] = a.btData.datas[code].l_intercept
# t['l_slope'] = a.btData.datas[code].l_slope
# t['sar'] = a.btData.datas[code].sar
bs = []
t['bs'] = 0
# print t['sar']
# print t.tail()
# plt.figure()
if a.acount.history_orders.get_history(code):
print len(a.acount.history_orders.get_history(code)), 'trade'
t_times = len(a.acount.history_orders.get_history(code))
for oh in a.acount.history_orders.get_history(code):
logger.info('order#%s' % oh)
if oh.date in t.index:
if 'buy' == oh.cmd:
# t.ix[oh.date]['bs'] = 5
t['bs'][oh.date] = 1
elif 'sell' == oh.cmd:
t['bs'][oh.date] = -1
# t.ix[oh.date]['bs']= oh.cmd
# t.ix['2015-08-07']['bs'] = 10
# print t.ix['2015-08-07']['bs']
# print '############################# history assets #####################'
# for x in a.acount.history_assets:
# print x
# print 'total_profit=', a.acount.history_orders.get_total_profit(code)
# print a.acount.cash, a.acount.get_market_value(), a.acount.get_assets(), a.acount.get_position_profit()
logger.info( '~ '+ code +'\t'+ str(a.baseline(sync=False)) +'\t'+ str((a.acount.get_assets() - a.acount.initial_cash)/a.acount.initial_cash))
# print t.describe()
# t.plot()
# plt.show()
X.append((a.baseline(sync=False), (a.acount.get_assets() - a.acount.initial_cash)/a.acount.initial_cash))
print X
| 35.90678
| 153
| 0.565494
|
50fc7d19a1b9488c02758f179f86a0843eb1c2c4
| 10,705
|
py
|
Python
|
tests/unit/pillar/test_azureblob.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/unit/pillar/test_azureblob.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/unit/pillar/test_azureblob.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Tests for the Azure Blob External Pillar.
"""
import os
import pickle
import tempfile
import time
import salt.config
import salt.loader
import salt.pillar.azureblob as azureblob
import salt.utils.files
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
HAS_LIBS = False
try:
# pylint: disable=no-name-in-module
from azure.storage.blob import BlobServiceClient
# pylint: enable=no-name-in-module
HAS_LIBS = True
except ImportError:
pass
class MockBlob(dict):
"""
Creates a Mock Blob object.
"""
name = ""
def __init__(self):
super().__init__(
{
"container": None,
"name": "test.sls",
"prefix": None,
"delimiter": "/",
"results_per_page": None,
"location_mode": None,
}
)
class MockContainerClient:
"""
Creates a Mock ContainerClient.
"""
def __init__(self):
pass
def walk_blobs(self, *args, **kwargs):
yield MockBlob()
def get_blob_client(self, *args, **kwargs):
pass
class MockBlobServiceClient:
"""
Creates a Mock BlobServiceClient.
"""
def __init__(self):
pass
def get_container_client(self, *args, **kwargs):
container_client = MockContainerClient()
return container_client
@skipIf(HAS_LIBS is False, "The azure.storage.blob module must be installed.")
class AzureBlobTestCase(TestCase, LoaderModuleMockMixin):
"""
TestCase for salt.pillar.azureblob ext_pillar.
"""
def setup_loader_modules(self):
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
utils = salt.loader.utils(self.opts)
return {
azureblob: {"__opts__": self.opts, "__utils__": utils},
}
def test__init_expired(self):
"""
Tests the result of _init when the cache is expired.
"""
container = "test"
multiple_env = False
environment = "base"
blob_cache_expire = 0 # The cache will be expired
blob_client = MockBlobServiceClient()
cache_file = tempfile.NamedTemporaryFile()
# Patches the _get_containers_cache_filename module so that it returns the name of the new tempfile that
# represents the cache file
with patch.object(
azureblob,
"_get_containers_cache_filename",
MagicMock(return_value=str(cache_file.name)),
):
# Patches the from_connection_string module of the BlobServiceClient class so that a connection string does
# not need to be given. Additionally it returns example blob data used by the ext_pillar.
with patch.object(
BlobServiceClient,
"from_connection_string",
MagicMock(return_value=blob_client),
):
ret = azureblob._init(
"", container, multiple_env, environment, blob_cache_expire
)
cache_file.close()
self.assertEqual(
ret,
{
"base": {
"test": [
{
"container": None,
"name": "test.sls",
"prefix": None,
"delimiter": "/",
"results_per_page": None,
"location_mode": None,
}
]
}
},
)
def test__init_not_expired(self):
"""
Tests the result of _init when the cache is not expired.
"""
container = "test"
multiple_env = False
environment = "base"
blob_cache_expire = (time.time()) * (
time.time()
) # The cache will not be expired
metadata = {
"base": {
"test": [
{"name": "base/secret.sls", "relevant": "include.sls"},
{"name": "blobtest.sls", "irrelevant": "ignore.sls"},
]
}
}
cache_file = tempfile.NamedTemporaryFile()
# Pickles the metadata and stores it in cache_file
with salt.utils.files.fopen(str(cache_file), "wb") as fp_:
pickle.dump(metadata, fp_)
# Patches the _get_containers_cache_filename module so that it returns the name of the new tempfile that
# represents the cache file
with patch.object(
azureblob,
"_get_containers_cache_filename",
MagicMock(return_value=str(cache_file.name)),
):
# Patches the _read_containers_cache_file module so that it returns what it normally would if the new
# tempfile representing the cache file was passed to it
plugged = azureblob._read_containers_cache_file(str(cache_file))
with patch.object(
azureblob,
"_read_containers_cache_file",
MagicMock(return_value=plugged),
):
ret = azureblob._init(
"", container, multiple_env, environment, blob_cache_expire
)
fp_.close()
os.remove(str(fp_.name))
cache_file.close()
self.assertEqual(ret, metadata)
def test__get_cache_dir(self):
"""
Tests the result of _get_cache_dir.
"""
ret = azureblob._get_cache_dir()
self.assertEqual(ret, "/var/cache/salt/master/pillar_azureblob")
def test__get_cached_file_name(self):
"""
Tests the result of _get_cached_file_name.
"""
container = "test"
saltenv = "base"
path = "base/secret.sls"
ret = azureblob._get_cached_file_name(container, saltenv, path)
self.assertEqual(
ret, "/var/cache/salt/master/pillar_azureblob/base/test/base/secret.sls"
)
def test__get_containers_cache_filename(self):
"""
Tests the result of _get_containers_cache_filename.
"""
container = "test"
ret = azureblob._get_containers_cache_filename(container)
self.assertEqual(
ret, "/var/cache/salt/master/pillar_azureblob/test-files.cache"
)
def test__refresh_containers_cache_file(self):
"""
Tests the result of _refresh_containers_cache_file to ensure that it successfully copies blob data into a
cache file.
"""
blob_client = MockBlobServiceClient()
container = "test"
cache_file = tempfile.NamedTemporaryFile()
with patch.object(
BlobServiceClient,
"from_connection_string",
MagicMock(return_value=blob_client),
):
ret = azureblob._refresh_containers_cache_file(
"", container, cache_file.name
)
cache_file.close()
self.assertEqual(
ret,
{
"base": {
"test": [
{
"container": None,
"name": "test.sls",
"prefix": None,
"delimiter": "/",
"results_per_page": None,
"location_mode": None,
}
]
}
},
)
def test__read_containers_cache_file(self):
"""
Tests the result of _read_containers_cache_file to make sure that it successfully loads in pickled metadata.
"""
metadata = {
"base": {
"test": [
{"name": "base/secret.sls", "relevant": "include.sls"},
{"name": "blobtest.sls", "irrelevant": "ignore.sls"},
]
}
}
cache_file = tempfile.NamedTemporaryFile()
# Pickles the metadata and stores it in cache_file
with salt.utils.files.fopen(str(cache_file), "wb") as fp_:
pickle.dump(metadata, fp_)
# Checks to see if _read_containers_cache_file can successfully read the pickled metadata from the cache file
ret = azureblob._read_containers_cache_file(str(cache_file))
fp_.close()
os.remove(str(fp_.name))
cache_file.close()
self.assertEqual(ret, metadata)
def test__find_files(self):
"""
Tests the result of _find_files. Ensures it only finds files and not directories. Ensures it also ignore
irrelevant files.
"""
metadata = {
"test": [
{"name": "base/secret.sls"},
{"name": "blobtest.sls", "irrelevant": "ignore.sls"},
{"name": "base/"},
]
}
ret = azureblob._find_files(metadata)
self.assertEqual(ret, {"test": ["base/secret.sls", "blobtest.sls"]})
def test__find_file_meta1(self):
"""
Tests the result of _find_file_meta when the metadata contains a blob with the specified path and a blob
without the specified path.
"""
metadata = {
"base": {
"test": [
{"name": "base/secret.sls", "relevant": "include.sls"},
{"name": "blobtest.sls", "irrelevant": "ignore.sls"},
]
}
}
container = "test"
saltenv = "base"
path = "base/secret.sls"
ret = azureblob._find_file_meta(metadata, container, saltenv, path)
self.assertEqual(ret, {"name": "base/secret.sls", "relevant": "include.sls"})
def test__find_file_meta2(self):
"""
Tests the result of _find_file_meta when the saltenv in metadata does not match the specified saltenv.
"""
metadata = {"wrong": {"test": [{"name": "base/secret.sls"}]}}
container = "test"
saltenv = "base"
path = "base/secret.sls"
ret = azureblob._find_file_meta(metadata, container, saltenv, path)
self.assertEqual(ret, None)
def test__find_file_meta3(self):
"""
Tests the result of _find_file_meta when the container in metadata does not match the specified metadata.
"""
metadata = {"base": {"wrong": [{"name": "base/secret.sls"}]}}
container = "test"
saltenv = "base"
path = "base/secret.sls"
ret = azureblob._find_file_meta(metadata, container, saltenv, path)
self.assertEqual(ret, None)
| 33.040123
| 119
| 0.55021
|
c11938b33bc7554057797d9b0e050750909ac597
| 2,110
|
py
|
Python
|
src/oci/oda/models/translator_collection.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/oda/models/translator_collection.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/oda/models/translator_collection.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TranslatorCollection(object):
"""
A collection of Translator summaries.
"""
def __init__(self, **kwargs):
"""
Initializes a new TranslatorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this TranslatorCollection.
:type items: list[oci.oda.models.TranslatorSummary]
"""
self.swagger_types = {
'items': 'list[TranslatorSummary]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
**[Required]** Gets the items of this TranslatorCollection.
The Translator summaries.
:return: The items of this TranslatorCollection.
:rtype: list[oci.oda.models.TranslatorSummary]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this TranslatorCollection.
The Translator summaries.
:param items: The items of this TranslatorCollection.
:type: list[oci.oda.models.TranslatorSummary]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 29.71831
| 245
| 0.655924
|
8c5305c38c7fb51e8705ce48522338ee8080b949
| 3,863
|
py
|
Python
|
gpAux/extensions/gphdfs/regression/legacy/lib/rpm_util.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 9
|
2018-04-20T03:31:01.000Z
|
2020-05-13T14:10:53.000Z
|
gpAux/extensions/gphdfs/regression/legacy/lib/rpm_util.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 36
|
2017-09-21T09:12:27.000Z
|
2020-06-17T16:40:48.000Z
|
gpAux/extensions/gphdfs/regression/legacy/lib/rpm_util.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 32
|
2017-08-31T12:50:52.000Z
|
2022-03-01T07:34:53.000Z
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.lib import local_path, run_shell_command
class RPMUtil(object):
"""Utility module for dealing with RPM packages"""
def __init__(self):
self.rpm_cmd = "sudo rpm"
def query_package(self, pkg_name):
"""
Queries for rpm package.
@param pkg_name: RPM package name
@return: tuple containing status of installed package and list of
matching packages
"""
cmd_str = "%s -qa | egrep \"%s\"" %(self.rpm_cmd, pkg_name)
res = {}
result = run_shell_command(cmd_str,"Query packages",res)
list_pkgs = res['stdout'].replace('\n',',').split(',')[:-1]
return (result, list_pkgs)
def is_pkg_installed(self, pkg_name):
"""
Checks if the package is present or not.
@return: True or False based on the status
"""
return self.query_package(pkg_name)[0]
def install_package_using_yum(self, pkg_name, is_regex_pkg_name = False):
"""
Installs a given package using yum installer.
If complete package name not known, can give regex pattern of package name
and pass is_regex_pkg_name as True
@param pkg_name: name of the package to be installed.
@param is_regex_pkg_name: True if passing regex pattern for package name else False
@return: Boolean value based on installation status
"""
if is_regex_pkg_name:
pkg_name = pkg_name + "*"
cmd_str = "sudo yum -y install %s"%pkg_name
res = {}
result = run_shell_command(cmd_str, "Install package using yum", res)
return result
def install_rpms_from(self, rpm_pkgs_loc):
"""
Installs all the rpms from a give location
@param rpm_pkgs_loc: location where all packages reside
"""
cmd_str = "%s -ivh %s/*.rpm" %(self.rpm_cmd, rpm_pkgs_loc)
res = {}
packages_installed = run_shell_command(cmd_str, "Install RPMs from loc - %s" %rpm_pkgs_loc, res)
if not packages_installed:
tinctest.logger.error("Failed to install rpms from %s - Error: %s" %(rpm_pkgs_loc, res['stderr']))
raise Exception("Failed to install rpms from %s - Error: %s" %(rpm_pkgs_loc, res['stderr']))
def erase_package(self, pkg_name):
"""
Erases a given rpm package
@param pkg_name: name of the package to be deleted
@return: deletion status
"""
cmd_str = "%s -e %s" %(self.rpm_cmd, pkg_name)
result = run_shell_command(cmd_str,"Erase packages")
return result
def erase_all_packages(self, pkg_name_regex):
"""
Erases more than 1 package based on the package name regex
@param pkg_name_regex: regex pattern of the packages to be deleted
@return: deletion status
"""
cmd_str = "%s -qa | egrep \"%s\" | xargs %s -e" %(self.rpm_cmd, pkg_name_regex, self.rpm_cmd)
result = run_shell_command(cmd_str,"Erase All packages matching regex pattern")
return result
| 38.247525
| 110
| 0.637329
|
12673e498880a63406c38f97aebd98a8bd6b68b8
| 4,588
|
py
|
Python
|
datasets/cifar10.py
|
baiyang1220/tensorflow1.x-basic
|
f35801a92a2a1b0e610b4eba0b292df215fe8e7e
|
[
"MIT"
] | null | null | null |
datasets/cifar10.py
|
baiyang1220/tensorflow1.x-basic
|
f35801a92a2a1b0e610b4eba0b292df215fe8e7e
|
[
"MIT"
] | null | null | null |
datasets/cifar10.py
|
baiyang1220/tensorflow1.x-basic
|
f35801a92a2a1b0e610b4eba0b292df215fe8e7e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import pickle
import numpy as np
from datasets.data_utils import get_filenames_from_urls
from datasets.data_utils import download
from datasets.data_utils import extract_tar
URLS = {
'data': r'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
}
DECOMPRESSED_FOLDER = r'cifar-10-batches-py'
DECOMPRESSED_FILENAMES = {
'train': ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5'],
'test': ['test_batch'],
}
def load(folder=None):
"""
Load CIFAR10 data from folder. If the data does NOT exist then download and
decompress it.
Parameters
----------
folder: Folder where to store CIFAR10 data and labels
Returns
-------
data: Parsed mnist data and labels in dict type
data = {
'train_data': (50000, 32, 32, 3) numpy array,
'train_labels': (50000,) numpy array,
'test_data': (10000, 32, 32, 3) numpy array,
'test_labels': (10000,) numpy array,
}
"""
if folder is None:
folder = '.'
original_folder = folder
parent_folder = os.path.split(original_folder)[0]
sub_folder = os.path.join(original_folder, DECOMPRESSED_FOLDER)
# check existence and completeness of data, download or/and decompress the
# data if needed
filenames = get_filenames_from_urls(URLS)
downloaded_filename = filenames.get('data')
if exist_data(original_folder):
folder = original_folder
elif exist_data(sub_folder):
folder = sub_folder
elif extract_data(os.path.join(original_folder, downloaded_filename)):
folder = sub_folder
elif extract_data(os.path.join(parent_folder, downloaded_filename)):
folder = original_folder
else:
download(URLS.get('data'), original_folder, downloaded_filename)
extract_tar(os.path.join(original_folder, downloaded_filename))
folder = sub_folder
# parse data
data = {
'train_data': [],
'train_labels': [],
'test_data': [],
'test_labels': [],
}
for filename in DECOMPRESSED_FILENAMES.get('train'):
path = os.path.join(folder, filename)
temp_data, temp_labels = parse_one_data_file(path)
data['train_data'].append(temp_data)
data['train_labels'].append((temp_labels))
for filename in DECOMPRESSED_FILENAMES.get('test'):
path = os.path.join(folder, filename)
temp_data, temp_labels = parse_one_data_file(path)
data['test_data'].append(temp_data)
data['test_labels'].append(temp_labels)
# concatenate data
data['train_data'] = np.concatenate(data.get('train_data'), axis=0)
data['train_labels'] = np.concatenate(data.get('train_labels'), axis=0)
data['test_data'] = np.concatenate(data.get('test_data'), axis=0)
data['test_labels'] = np.concatenate(data.get('test_labels'), axis=0)
return data
def unpickle(filename):
"""
Parse CIFAR10 data.
Return a dict containing {data, filenames, labels, batch_label}
"""
with open(filename, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def exist_data(folder):
"""
Check existence and completeness of decompressed data in folder
Parameters
----------
folder: Folder where to store the decompressed CIFAR10 data
"""
for name in DECOMPRESSED_FILENAMES.keys():
filename_list = DECOMPRESSED_FILENAMES.get(name)
for filename in filename_list:
full_name = os.path.join(folder, filename)
if not os.path.exists(full_name):
return False
return True
def extract_data(path):
"""
Extract data if path exists. Return True if successfully extracted else
False
Parameters
----------
path: Path of downloaded .tar.gz file
"""
if os.path.exists(path):
extract_tar(path)
return True
return False
def parse_one_data_file(path):
"""
Parse one data file to obtain data and labels
Parameters
----------
path: Path of data file, such as 'xxxx/data_batch_1'
"""
data_dict = unpickle(path)
# type of key in data_dict is not 'str' but 'bytes', a prefix of 'b'
# should be ahead of key in order to get value
data = data_dict.get(b'data')
labels = data_dict.get(b'labels')
# default cifar-10 data encoding is channel first
data = np.reshape(data, [-1, 3, 32, 32])
# transpose to channel last
data = np.transpose(data, [0, 2, 3, 1])
return data, labels
| 29.792208
| 79
| 0.646905
|
2dab4443adcb473402e2a8081b3a8ad2f4a28520
| 2,696
|
py
|
Python
|
awx/main/tests/unit/commands/test_replay_job_events.py
|
sumit-21/awx
|
966a62c6bf2ec0c672e076684341bc6bd75827af
|
[
"Apache-2.0"
] | 17
|
2021-04-03T01:40:17.000Z
|
2022-03-03T11:45:20.000Z
|
awx/main/tests/unit/commands/test_replay_job_events.py
|
Saurabh-Thakre/awx
|
8eb377a3ea8303c394ad4c958cc828c7239c1e11
|
[
"Apache-2.0"
] | 24
|
2021-05-18T21:13:35.000Z
|
2022-03-29T10:23:52.000Z
|
awx/main/tests/unit/commands/test_replay_job_events.py
|
Saurabh-Thakre/awx
|
8eb377a3ea8303c394ad4c958cc828c7239c1e11
|
[
"Apache-2.0"
] | 24
|
2020-11-27T08:37:35.000Z
|
2021-03-08T13:27:15.000Z
|
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved
# Python
import pytest
from unittest import mock
from datetime import timedelta
# Django
from django.utils import timezone
# AWX
from awx.main.models import (
Job,
JobEvent,
)
from awx.main.management.commands.replay_job_events import (
ReplayJobEvents,
)
class TestReplayJobEvents():
@pytest.fixture
def epoch(self):
return timezone.now()
@pytest.fixture
def job_events(self, epoch):
return [
JobEvent(created=epoch),
JobEvent(created=epoch + timedelta(seconds=10)),
JobEvent(created=epoch + timedelta(seconds=20)),
JobEvent(created=epoch + timedelta(seconds=30)),
JobEvent(created=epoch + timedelta(seconds=31)),
JobEvent(created=epoch + timedelta(seconds=31, milliseconds=1)),
JobEvent(created=epoch + timedelta(seconds=31, microseconds=1, milliseconds=1)),
]
@pytest.fixture
def mock_serializer_fn(self):
class MockSerializer():
data = dict()
def fn(job_event):
serialized = MockSerializer()
serialized.data['group_name'] = 'foobar'
return serialized
return fn
@pytest.fixture
def replayer(self, mocker, job_events, mock_serializer_fn):
r = ReplayJobEvents()
r.get_serializer = lambda self: mock_serializer_fn
r.get_job = mocker.MagicMock(return_value=Job(id=3))
r.sleep = mocker.MagicMock()
r.get_job_events = lambda self: (job_events, len(job_events))
r.replay_offset = lambda *args, **kwarg: 0
r.emit_job_status = lambda job, status: True
return r
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
def test_sleep(self, mocker, replayer):
replayer.run(3, 1)
assert replayer.sleep.call_count == 6
replayer.sleep.assert_has_calls([
mock.call(10.0),
mock.call(10.0),
mock.call(10.0),
mock.call(1.0),
mock.call(0.001),
mock.call(0.000001),
])
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
def test_speed(self, mocker, replayer):
replayer.run(3, 2)
assert replayer.sleep.call_count == 6
replayer.sleep.assert_has_calls([
mock.call(5.0),
mock.call(5.0),
mock.call(5.0),
mock.call(0.5),
mock.call(0.0005),
mock.call(0.0000005),
])
# TODO: Test replay_offset()
# TODO: Test stat generation
| 28.989247
| 106
| 0.614243
|
2705758d04d067af871a7e87635708258f71b3f9
| 1,900
|
py
|
Python
|
agents.py
|
liuzhangyi/2048-api
|
475bd331c5e4ac1f9f26dcf532c4fa1decdf7345
|
[
"Apache-2.0"
] | 1
|
2018-12-11T06:38:43.000Z
|
2018-12-11T06:38:43.000Z
|
agents.py
|
liuzhangyi/2048-api
|
475bd331c5e4ac1f9f26dcf532c4fa1decdf7345
|
[
"Apache-2.0"
] | null | null | null |
agents.py
|
liuzhangyi/2048-api
|
475bd331c5e4ac1f9f26dcf532c4fa1decdf7345
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import keras
from keras.models import Model
OUT_SHAPE=(4,4)
map_table={2**i:i for i in range(1,16)}
map_table[0]=0
def grid_ohe(arr):
ret=np.zeros(shape=OUT_SHAPE+(16,),dtype=bool)
for r in range(OUT_SHAPE[0]):
for c in range(OUT_SHAPE[1]):
ret[r,c,map_table[arr[r,c]]]=1
return ret
model=keras.models.load_model('2048.model')
class Agent:
'''Agent Base.'''
def __init__(self, game, display=None):
self.game = game
self.display = display
def play(self, max_iter=np.inf, verbose=False):
n_iter = 0
while (n_iter < max_iter) and (not self.game.end):
direction = self.step()
self.game.move(direction)
n_iter += 1
if verbose:
print("Iter: {}".format(n_iter))
print("======Direction: {}======".format(
["left", "down", "right", "up"][direction]))
if self.display is not None:
self.display.display(self.game)
def step(self):
direction = int(input("0: left, 1: down, 2: right, 3: up = ")) % 4
return direction
class RandomAgent(Agent):
def step(self):
direction = np.random.randint(0, 4)
return direction
class myAgent(Agent):
def step(self):
board=grid_ohe(self.game.board)
direction = model.predict(np.expand_dims(board,axis=0)).argmax()
return direction
class ExpectiMaxAgent(Agent):
def __init__(self, game, display=None):
if game.size != 4:
raise ValueError(
"`%s` can only work with game of `size` 4." % self.__class__.__name__)
super().__init__(game, display)
from .expectimax import board_to_move
self.search_func = board_to_move
def step(self):
direction = self.search_func(self.game.board)
return direction
| 27.941176
| 86
| 0.585263
|
19d74f542e2ddaf9527f7a4851760c79245f02cc
| 6,376
|
py
|
Python
|
sktime/transformers/series_to_series.py
|
TonyBagnall/sktime
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T13:59:21.000Z
|
2020-03-02T20:32:31.000Z
|
sktime/transformers/series_to_series.py
|
TonyBagnall/boss_fork
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/transformers/series_to_series.py
|
TonyBagnall/boss_fork
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | 2
|
2019-08-24T12:06:15.000Z
|
2020-01-09T07:32:40.000Z
|
from sklearn.utils.validation import check_is_fitted
import numpy as np
import pandas as pd
from ..utils.validation import check_equal_index
from ..utils.transformations import tabularize, concat_nested_arrays
from ..utils.time_series import rand_intervals_rand_n, rand_intervals_fixed_n
from .base import BaseTransformer
__all__ = ['RandomIntervalSegmenter']
class RandomIntervalSegmenter(BaseTransformer):
"""Transformer that segments time-series into random intervals.
Parameters
----------
param n_intervals: str or int
Number of intervals to generate.
- If "sqrt", sqrt of length of time-series is used.
- If "random", random number of intervals is generated.
- If int, n_intervals intervals are generated.
Default is "sqrt".
param random_state: : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
param check_input: bool, optional (default=True)
When set to ``True``, inputs will be validated, otherwise inputs are assumed to be valid
and no checks are performed. Use with caution.
"""
def __init__(self, n_intervals='sqrt', min_length=None, random_state=None, check_input=True):
self.input_indexes_ = [] # list of time-series indexes of each column
self.random_state = random_state
self.check_input = check_input
self.intervals_ = []
self.input_shape_ = ()
self.n_intervals = n_intervals
self.columns_ = []
if min_length is None:
self.min_length = 1
else:
self.min_length = min_length
if n_intervals in ('sqrt', 'random'):
self.n_intervals = n_intervals
elif np.issubdtype(type(n_intervals), np.integer):
if n_intervals <= 0:
raise ValueError('Number of intervals must be positive')
self.n_intervals = n_intervals
else:
raise ValueError(f'Number of intervals must be either "random", "sqrt" or positive integer, '
f'but found {type(n_intervals)}')
def fit(self, X, y=None):
"""Fit transformer, generating random interval indices.
Parameters
----------
X : pandas DataFrame of shape [n_samples, n_features]
Input data
y : pandas Series, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : RandomIntervalSegmenter
This estimator
"""
if self.check_input:
# TODO check input is series column, not column of primitives
pass
# Cast into 2d dataframe
if X.ndim == 1:
X = pd.DataFrame(X)
self.input_shape_ = X.shape
# Retrieve time-series indexes from each column.
# TODO generalise to columns with series of unequal length
self.input_indexes_ = [X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index')
else np.arange(X.iloc[0, c].shape[0]) for c in range(self.input_shape_[1])]
# Compute random intervals for each column.
# TODO if multiple columns are passed, introduce option to compute one set of shared intervals,
# or use ColumnTransformer?
if self.n_intervals == 'random':
self.intervals_ = [rand_intervals_rand_n(self.input_indexes_[c],
random_state=self.random_state)
for c in range(self.input_shape_[1])]
else:
self.intervals_ = [rand_intervals_fixed_n(self.input_indexes_[c],
n=self.n_intervals,
min_length=self.min_length,
random_state=self.random_state)
for c in range(self.input_shape_[1])]
return self
def transform(self, X, y=None):
"""Transform X, segments time-series in each column into random intervals using interval indices generated
during `fit`.
Parameters
----------
X : nested pandas DataFrame of shape [n_samples, n_features]
Nested dataframe with time-series in cells.
Returns
-------
Xt : pandas DataFrame
Transformed pandas DataFrame with same number of rows and one column for each generated interval.
"""
# Check is fit had been called
check_is_fitted(self, 'intervals_')
# Cast into 2d dataframe
if X.ndim == 1:
X = pd.DataFrame(X)
# Check inputs.
if self.check_input:
# Check that the input is of the same shape as the one passed
# during fit.
if (X.shape[1] if X.ndim == 2 else 1) != self.input_shape_[1]:
raise ValueError('Number of columns of input is different from what was seen'
'in `fit`')
# Input validation
if not all([np.array_equal(fit_idx, trans_idx) for trans_idx, fit_idx in zip(check_equal_index(X),
self.input_indexes_)]):
raise ValueError('Indexes of input time-series are different from what was seen in `fit`')
# Segment into intervals.
intervals = []
self.columns_ = []
for c, (colname, col) in enumerate(X.items()):
# Tabularize each column assuming series have equal indexes in any given column.
# TODO generalise to non-equal-index cases
arr = tabularize(col, return_array=True)
for start, end in self.intervals_[c]:
interval = arr[:, start:end]
intervals.append(interval)
self.columns_.append(f'{colname}_{start}_{end}')
# Return nested pandas Series or DataFrame.
Xt = pd.DataFrame(concat_nested_arrays(intervals, return_arrays=True))
Xt.columns = self.columns_
return Xt
| 40.871795
| 114
| 0.592221
|
4687f3f3ede65252208b5d368d3d46bc0032366e
| 28
|
py
|
Python
|
cgen/hsm/__init__.py
|
jszeman/chsm
|
0283d722553aea09477e039a718b9500d814f0ae
|
[
"MIT"
] | 4
|
2021-04-16T18:28:00.000Z
|
2022-03-01T07:29:21.000Z
|
cgen/hsm/__init__.py
|
xsession/chsm
|
1c502897c8fce56bb48ad49de6d642f2bb66ff95
|
[
"MIT"
] | 6
|
2020-10-25T19:56:59.000Z
|
2022-03-24T05:00:10.000Z
|
cgen/hsm/__init__.py
|
xsession/chsm
|
1c502897c8fce56bb48ad49de6d642f2bb66ff95
|
[
"MIT"
] | 1
|
2021-05-27T07:01:28.000Z
|
2021-05-27T07:01:28.000Z
|
from .sm import StateMachine
| 28
| 28
| 0.857143
|
304a88acdc435541fe9055a0d50f8fa00ba1c3a3
| 651
|
py
|
Python
|
Regular_Expressions_for_NW_Auto/re_Basics/re_Basics_3.py
|
yasser296/Python-Projects
|
eae3598e2d4faf08d9def92c8b417c2e7946c5f4
|
[
"MIT"
] | null | null | null |
Regular_Expressions_for_NW_Auto/re_Basics/re_Basics_3.py
|
yasser296/Python-Projects
|
eae3598e2d4faf08d9def92c8b417c2e7946c5f4
|
[
"MIT"
] | null | null | null |
Regular_Expressions_for_NW_Auto/re_Basics/re_Basics_3.py
|
yasser296/Python-Projects
|
eae3598e2d4faf08d9def92c8b417c2e7946c5f4
|
[
"MIT"
] | null | null | null |
import re
print( re.match('[a-z]+' , "configuration register is 0x2102").group() )
print( re.match('[a-z]+' , "configuration register is 0x2102").start() )
print( re.match('[a-z]+' , "configuration register is 0x2102").end() )
print( re.match('[a-z]+' , "configuration register is 0x2102").span() )
print( re.match('[a-z]+' , "configuration register is 0x2102").group ) # object location in the memory
print( re.match('[a-z]+' , "configuration register is 0x2102").start )
print( re.match('[a-z]+' , "configuration register is 0x2102").end )
print( re.match('[a-z]+' , "configuration register is 0x2102").span )
| 29.590909
| 108
| 0.631336
|
9e5eb933138b79d3fff9fb48e06815260a9c1d04
| 446
|
py
|
Python
|
tests/test_run.py
|
prophile/bong
|
568205c5977415003f740218aaf6a4ccff28c33e
|
[
"MIT"
] | null | null | null |
tests/test_run.py
|
prophile/bong
|
568205c5977415003f740218aaf6a4ccff28c33e
|
[
"MIT"
] | null | null | null |
tests/test_run.py
|
prophile/bong
|
568205c5977415003f740218aaf6a4ccff28c33e
|
[
"MIT"
] | 1
|
2018-03-05T17:16:50.000Z
|
2018-03-05T17:16:50.000Z
|
from bong.settings import BongSettings
from bong.run import run
from unittest.mock import Mock
def test_run_length():
notify = Mock()
sleep = Mock()
run(BongSettings(time=40, message='bees'), sleep=sleep, notify=notify)
sleep.assert_called_with(40)
def test_notification():
notify = Mock()
sleep = Mock()
run(BongSettings(time=40, message='bees'), sleep=sleep, notify=notify)
notify.assert_called_with('bees')
| 24.777778
| 74
| 0.710762
|
5631ec06cf1ab48489380ffc2bb415017053166f
| 33,274
|
py
|
Python
|
homeassistant/components/media_player/bluesound.py
|
sara0871/laughing--barnacle-
|
70412fc0ba42ccfe446c0c62e327eceeda56a2ab
|
[
"Apache-2.0"
] | 2
|
2020-12-06T23:15:21.000Z
|
2021-03-20T20:21:03.000Z
|
homeassistant/components/media_player/bluesound.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:06:43.000Z
|
2022-03-12T00:56:04.000Z
|
homeassistant/components/media_player/bluesound.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 1
|
2019-04-26T12:59:54.000Z
|
2019-04-26T12:59:54.000Z
|
"""
Support for Bluesound devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.bluesound/
"""
import asyncio
from asyncio.futures import CancelledError
from datetime import timedelta
import logging
import aiohttp
from aiohttp.client_exceptions import ClientError
from aiohttp.hdrs import CONNECTION, KEEP_ALIVE
import async_timeout
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA,
SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_STOP,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP,
MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_HOSTS, CONF_NAME, CONF_PORT,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, STATE_IDLE,
STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['xmltodict==0.11.0']
_LOGGER = logging.getLogger(__name__)
ATTR_MASTER = 'master'
DATA_BLUESOUND = 'bluesound'
DEFAULT_PORT = 11000
NODE_OFFLINE_CHECK_TIMEOUT = 180
NODE_RETRY_INITIATION = timedelta(minutes=3)
SERVICE_CLEAR_TIMER = 'bluesound_clear_sleep_timer'
SERVICE_JOIN = 'bluesound_join'
SERVICE_SET_TIMER = 'bluesound_set_sleep_timer'
SERVICE_UNJOIN = 'bluesound_unjoin'
STATE_GROUPED = 'grouped'
SYNC_STATUS_INTERVAL = timedelta(minutes=5)
UPDATE_CAPTURE_INTERVAL = timedelta(minutes=30)
UPDATE_PRESETS_INTERVAL = timedelta(minutes=30)
UPDATE_SERVICES_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOSTS): vol.All(cv.ensure_list, [{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}])
})
BS_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
BS_JOIN_SCHEMA = BS_SCHEMA.extend({
vol.Required(ATTR_MASTER): cv.entity_id,
})
SERVICE_TO_METHOD = {
SERVICE_JOIN: {
'method': 'async_join',
'schema': BS_JOIN_SCHEMA},
SERVICE_UNJOIN: {
'method': 'async_unjoin',
'schema': BS_SCHEMA},
SERVICE_SET_TIMER: {
'method': 'async_increase_timer',
'schema': BS_SCHEMA},
SERVICE_CLEAR_TIMER: {
'method': 'async_clear_timer',
'schema': BS_SCHEMA}
}
def _add_player(hass, async_add_devices, host, port=None, name=None):
"""Add Bluesound players."""
if host in [x.host for x in hass.data[DATA_BLUESOUND]]:
return
@callback
def _init_player(event=None):
"""Start polling."""
hass.async_add_job(player.async_init())
@callback
def _start_polling(event=None):
"""Start polling."""
player.start_polling()
@callback
def _stop_polling():
"""Stop polling."""
player.stop_polling()
@callback
def _add_player_cb():
"""Add player after first sync fetch."""
async_add_devices([player])
_LOGGER.info("Added device with name: %s", player.name)
if hass.is_running:
_start_polling()
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, _start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_polling)
player = BluesoundPlayer(hass, host, port, name, _add_player_cb)
hass.data[DATA_BLUESOUND].append(player)
if hass.is_running:
_init_player()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _init_player)
async def async_setup_platform(
hass, config, async_add_devices, discovery_info=None):
"""Set up the Bluesound platforms."""
if DATA_BLUESOUND not in hass.data:
hass.data[DATA_BLUESOUND] = []
if discovery_info:
_add_player(hass, async_add_devices, discovery_info.get(CONF_HOST),
discovery_info.get(CONF_PORT, None))
return
hosts = config.get(CONF_HOSTS, None)
if hosts:
for host in hosts:
_add_player(
hass, async_add_devices, host.get(CONF_HOST),
host.get(CONF_PORT), host.get(CONF_NAME))
async def async_service_handler(service):
"""Map services to method of Bluesound devices."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_players = [player for player in hass.data[DATA_BLUESOUND]
if player.entity_id in entity_ids]
else:
target_players = hass.data[DATA_BLUESOUND]
for player in target_players:
await getattr(player, method['method'])(**params)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]['schema']
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema)
class BluesoundPlayer(MediaPlayerDevice):
"""Representation of a Bluesound Player."""
def __init__(self, hass, host, port=None, name=None, init_callback=None):
"""Initialize the media player."""
self.host = host
self._hass = hass
self.port = port
self._polling_session = async_get_clientsession(hass)
self._polling_task = None # The actual polling task.
self._name = name
self._icon = None
self._capture_items = []
self._services_items = []
self._preset_items = []
self._sync_status = {}
self._status = None
self._last_status_update = None
self._is_online = False
self._retry_remove = None
self._lastvol = None
self._master = None
self._is_master = False
self._group_name = None
self._init_callback = init_callback
if self.port is None:
self.port = DEFAULT_PORT
class _TimeoutException(Exception):
pass
@staticmethod
def _try_get_index(string, search_string):
"""Get the index."""
try:
return string.index(search_string)
except ValueError:
return -1
async def force_update_sync_status(
self, on_updated_cb=None, raise_timeout=False):
"""Update the internal status."""
resp = await self.send_bluesound_command(
'SyncStatus', raise_timeout, raise_timeout)
if not resp:
return None
self._sync_status = resp['SyncStatus'].copy()
if not self._name:
self._name = self._sync_status.get('@name', self.host)
if not self._icon:
self._icon = self._sync_status.get('@icon', self.host)
master = self._sync_status.get('master', None)
if master is not None:
self._is_master = False
master_host = master.get('#text')
master_device = [device for device in
self._hass.data[DATA_BLUESOUND]
if device.host == master_host]
if master_device and master_host != self.host:
self._master = master_device[0]
else:
self._master = None
_LOGGER.error("Master not found %s", master_host)
else:
if self._master is not None:
self._master = None
slaves = self._sync_status.get('slave', None)
self._is_master = slaves is not None
if on_updated_cb:
on_updated_cb()
return True
async def _start_poll_command(self):
"""Loop which polls the status of the player."""
try:
while True:
await self.async_update_status()
except (asyncio.TimeoutError, ClientError,
BluesoundPlayer._TimeoutException):
_LOGGER.info("Node %s is offline, retrying later", self._name)
await asyncio.sleep(
NODE_OFFLINE_CHECK_TIMEOUT, loop=self._hass.loop)
self.start_polling()
except CancelledError:
_LOGGER.debug("Stopping the polling of node %s", self._name)
except Exception:
_LOGGER.exception("Unexpected error in %s", self._name)
raise
def start_polling(self):
"""Start the polling task."""
self._polling_task = self._hass.async_add_job(
self._start_poll_command())
def stop_polling(self):
"""Stop the polling task."""
self._polling_task.cancel()
async def async_init(self, triggered=None):
"""Initialize the player async."""
try:
if self._retry_remove is not None:
self._retry_remove()
self._retry_remove = None
await self.force_update_sync_status(
self._init_callback, True)
except (asyncio.TimeoutError, ClientError):
_LOGGER.info("Node %s is offline, retrying later", self.host)
self._retry_remove = async_track_time_interval(
self._hass, self.async_init, NODE_RETRY_INITIATION)
except Exception:
_LOGGER.exception(
"Unexpected when initiating error in %s", self.host)
raise
async def async_update(self):
"""Update internal status of the entity."""
if not self._is_online:
return
await self.async_update_sync_status()
await self.async_update_presets()
await self.async_update_captures()
await self.async_update_services()
async def send_bluesound_command(
self, method, raise_timeout=False, allow_offline=False):
"""Send command to the player."""
import xmltodict
if not self._is_online and not allow_offline:
return
if method[0] == '/':
method = method[1:]
url = "http://{}:{}/{}".format(self.host, self.port, method)
_LOGGER.debug("Calling URL: %s", url)
response = None
try:
websession = async_get_clientsession(self._hass)
with async_timeout.timeout(10, loop=self._hass.loop):
response = await websession.get(url)
if response.status == 200:
result = await response.text()
if result:
data = xmltodict.parse(result)
else:
data = None
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error("Error %s on %s", response.status, url)
return None
except (asyncio.TimeoutError, aiohttp.ClientError):
if raise_timeout:
_LOGGER.info("Timeout: %s", self.host)
raise
else:
_LOGGER.debug("Failed communicating: %s", self.host)
return None
return data
async def async_update_status(self):
"""Use the poll session to always get the status of the player."""
import xmltodict
response = None
url = 'Status'
etag = ''
if self._status is not None:
etag = self._status.get('@etag', '')
if etag != '':
url = 'Status?etag={}&timeout=120.0'.format(etag)
url = "http://{}:{}/{}".format(self.host, self.port, url)
_LOGGER.debug("Calling URL: %s", url)
try:
with async_timeout.timeout(125, loop=self._hass.loop):
response = await self._polling_session.get(
url, headers={CONNECTION: KEEP_ALIVE})
if response.status == 200:
result = await response.text()
self._is_online = True
self._last_status_update = dt_util.utcnow()
self._status = xmltodict.parse(result)['status'].copy()
group_name = self._status.get('groupName', None)
if group_name != self._group_name:
_LOGGER.debug(
"Group name change detected on device: %s", self.host)
self._group_name = group_name
# the sleep is needed to make sure that the
# devices is synced
await asyncio.sleep(1, loop=self._hass.loop)
await self.async_trigger_sync_on_all()
elif self.is_grouped:
# when player is grouped we need to fetch volume from
# sync_status. We will force an update if the player is
# grouped this isn't a foolproof solution. A better
# solution would be to fetch sync_status more often when
# the device is playing. This would solve alot of
# problems. This change will be done when the
# communication is moved to a separate library
await self.force_update_sync_status()
self.async_schedule_update_ha_state()
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error("Error %s on %s. Trying one more time",
response.status, url)
except (asyncio.TimeoutError, ClientError):
self._is_online = False
self._last_status_update = None
self._status = None
self.async_schedule_update_ha_state()
_LOGGER.info(
"Client connection error, marking %s as offline", self._name)
raise
async def async_trigger_sync_on_all(self):
"""Trigger sync status update on all devices."""
_LOGGER.debug("Trigger sync status on all devices")
for player in self._hass.data[DATA_BLUESOUND]:
await player.force_update_sync_status()
@Throttle(SYNC_STATUS_INTERVAL)
async def async_update_sync_status(
self, on_updated_cb=None, raise_timeout=False):
"""Update sync status."""
await self.force_update_sync_status(
on_updated_cb, raise_timeout=False)
@Throttle(UPDATE_CAPTURE_INTERVAL)
async def async_update_captures(self):
"""Update Capture sources."""
resp = await self.send_bluesound_command(
'RadioBrowse?service=Capture')
if not resp:
return
self._capture_items = []
def _create_capture_item(item):
self._capture_items.append({
'title': item.get('@text', ''),
'name': item.get('@text', ''),
'type': item.get('@serviceType', 'Capture'),
'image': item.get('@image', ''),
'url': item.get('@URL', '')
})
if 'radiotime' in resp and 'item' in resp['radiotime']:
if isinstance(resp['radiotime']['item'], list):
for item in resp['radiotime']['item']:
_create_capture_item(item)
else:
_create_capture_item(resp['radiotime']['item'])
return self._capture_items
@Throttle(UPDATE_PRESETS_INTERVAL)
async def async_update_presets(self):
"""Update Presets."""
resp = await self.send_bluesound_command('Presets')
if not resp:
return
self._preset_items = []
def _create_preset_item(item):
self._preset_items.append({
'title': item.get('@name', ''),
'name': item.get('@name', ''),
'type': 'preset',
'image': item.get('@image', ''),
'is_raw_url': True,
'url2': item.get('@url', ''),
'url': 'Preset?id={}'.format(item.get('@id', ''))
})
if 'presets' in resp and 'preset' in resp['presets']:
if isinstance(resp['presets']['preset'], list):
for item in resp['presets']['preset']:
_create_preset_item(item)
else:
_create_preset_item(resp['presets']['preset'])
return self._preset_items
@Throttle(UPDATE_SERVICES_INTERVAL)
async def async_update_services(self):
"""Update Services."""
resp = await self.send_bluesound_command('Services')
if not resp:
return
self._services_items = []
def _create_service_item(item):
self._services_items.append({
'title': item.get('@displayname', ''),
'name': item.get('@name', ''),
'type': item.get('@type', ''),
'image': item.get('@icon', ''),
'url': item.get('@name', '')
})
if 'services' in resp and 'service' in resp['services']:
if isinstance(resp['services']['service'], list):
for item in resp['services']['service']:
_create_service_item(item)
else:
_create_service_item(resp['services']['service'])
return self._services_items
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
if self._status is None:
return STATE_OFF
if self.is_grouped and not self.is_master:
return STATE_GROUPED
status = self._status.get('state', None)
if status in ('pause', 'stop'):
return STATE_PAUSED
if status in ('stream', 'play'):
return STATE_PLAYING
return STATE_IDLE
@property
def media_title(self):
"""Title of current playing media."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
return self._status.get('title1', None)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
if self._status is None:
return None
if self.is_grouped and not self.is_master:
return self._group_name
artist = self._status.get('artist', None)
if not artist:
artist = self._status.get('title2', None)
return artist
@property
def media_album_name(self):
"""Artist of current playing media (Music track only)."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
album = self._status.get('album', None)
if not album:
album = self._status.get('title3', None)
return album
@property
def media_image_url(self):
"""Image url of current playing media."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
url = self._status.get('image', None)
if not url:
return
if url[0] == '/':
url = "http://{}:{}{}".format(self.host, self.port, url)
return url
@property
def media_position(self):
"""Position of current playing media in seconds."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
mediastate = self.state
if self._last_status_update is None or mediastate == STATE_IDLE:
return None
position = self._status.get('secs', None)
if position is None:
return None
position = float(position)
if mediastate == STATE_PLAYING:
position += (dt_util.utcnow() -
self._last_status_update).total_seconds()
return position
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
duration = self._status.get('totlen', None)
if duration is None:
return None
return float(duration)
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_status_update
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = self._status.get('volume', None)
if self.is_grouped:
volume = self._sync_status.get('@volume', None)
if volume is not None:
return int(volume) / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
volume = self.volume_level
if not volume:
return None
return 0 <= volume < 0.001
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def icon(self):
"""Return the icon of the device."""
return self._icon
@property
def source_list(self):
"""List of available input sources."""
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
sources = []
for source in self._preset_items:
sources.append(source['title'])
for source in [x for x in self._services_items
if x['type'] == 'LocalMusic' or
x['type'] == 'RadioService']:
sources.append(source['title'])
for source in self._capture_items:
sources.append(source['title'])
return sources
@property
def source(self):
"""Name of the current input source."""
from urllib import parse
if (self._status is None or
(self.is_grouped and not self.is_master)):
return None
current_service = self._status.get('service', '')
if current_service == '':
return ''
stream_url = self._status.get('streamUrl', '')
if self._status.get('is_preset', '') == '1' and stream_url != '':
# This check doesn't work with all presets, for example playlists.
# But it works with radio service_items will catch playlists.
items = [x for x in self._preset_items if 'url2' in x and
parse.unquote(x['url2']) == stream_url]
if items:
return items[0]['title']
# This could be a bit difficult to detect. Bluetooth could be named
# different things and there is not any way to match chooses in
# capture list to current playing. It's a bit of guesswork.
# This method will be needing some tweaking over time.
title = self._status.get('title1', '').lower()
if title == 'bluetooth' or stream_url == 'Capture:hw:2,0/44100/16/2':
items = [x for x in self._capture_items
if x['url'] == "Capture%3Abluez%3Abluetooth"]
if items:
return items[0]['title']
items = [x for x in self._capture_items if x['url'] == stream_url]
if items:
return items[0]['title']
if stream_url[:8] == 'Capture:':
stream_url = stream_url[8:]
idx = BluesoundPlayer._try_get_index(stream_url, ':')
if idx > 0:
stream_url = stream_url[:idx]
for item in self._capture_items:
url = parse.unquote(item['url'])
if url[:8] == 'Capture:':
url = url[8:]
idx = BluesoundPlayer._try_get_index(url, ':')
if idx > 0:
url = url[:idx]
if url.lower() == stream_url.lower():
return item['title']
items = [x for x in self._capture_items
if x['name'] == current_service]
if items:
return items[0]['title']
items = [x for x in self._services_items
if x['name'] == current_service]
if items:
return items[0]['title']
if self._status.get('streamUrl', '') != '':
_LOGGER.debug("Couldn't find source of stream URL: %s",
self._status.get('streamUrl', ''))
return None
@property
def supported_features(self):
"""Flag of media commands that are supported."""
if self._status is None:
return None
if self.is_grouped and not self.is_master:
return SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE
supported = SUPPORT_CLEAR_PLAYLIST
if self._status.get('indexing', '0') == '0':
supported = supported | SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA | \
SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_SELECT_SOURCE | \
SUPPORT_SHUFFLE_SET
current_vol = self.volume_level
if current_vol is not None and current_vol >= 0:
supported = supported | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE
if self._status.get('canSeek', '') == '1':
supported = supported | SUPPORT_SEEK
return supported
@property
def is_master(self):
"""Return true if player is a coordinator."""
return self._is_master
@property
def is_grouped(self):
"""Return true if player is a coordinator."""
return self._master is not None or self._is_master
@property
def shuffle(self):
"""Return true if shuffle is active."""
return True if self._status.get('shuffle', '0') == '1' else False
async def async_join(self, master):
"""Join the player to a group."""
master_device = [device for device in self.hass.data[DATA_BLUESOUND]
if device.entity_id == master]
if master_device:
_LOGGER.debug("Trying to join player: %s to master: %s",
self.host, master_device[0].host)
await master_device[0].async_add_slave(self)
else:
_LOGGER.error("Master not found %s", master_device)
async def async_unjoin(self):
"""Unjoin the player from a group."""
if self._master is None:
return
_LOGGER.debug("Trying to unjoin player: %s", self.host)
await self._master.async_remove_slave(self)
async def async_add_slave(self, slave_device):
"""Add slave to master."""
return await self.send_bluesound_command(
'/AddSlave?slave={}&port={}'.format(
slave_device.host, slave_device.port))
async def async_remove_slave(self, slave_device):
"""Remove slave to master."""
return await self.send_bluesound_command(
'/RemoveSlave?slave={}&port={}'.format(
slave_device.host, slave_device.port))
async def async_increase_timer(self):
"""Increase sleep time on player."""
sleep_time = await self.send_bluesound_command('/Sleep')
if sleep_time is None:
_LOGGER.error(
"Error while increasing sleep time on player: %s", self.host)
return 0
return int(sleep_time.get('sleep', '0'))
async def async_clear_timer(self):
"""Clear sleep timer on player."""
sleep = 1
while sleep > 0:
sleep = await self.async_increase_timer()
async def async_set_shuffle(self, shuffle):
"""Enable or disable shuffle mode."""
value = '1' if shuffle else '0'
return await self.send_bluesound_command(
'/Shuffle?state={}'.format(value))
async def async_select_source(self, source):
"""Select input source."""
if self.is_grouped and not self.is_master:
return
items = [x for x in self._preset_items if x['title'] == source]
if not items:
items = [x for x in self._services_items if x['title'] == source]
if not items:
items = [x for x in self._capture_items if x['title'] == source]
if not items:
return
selected_source = items[0]
url = 'Play?url={}&preset_id&image={}'.format(
selected_source['url'], selected_source['image'])
if 'is_raw_url' in selected_source and selected_source['is_raw_url']:
url = selected_source['url']
return await self.send_bluesound_command(url)
async def async_clear_playlist(self):
"""Clear players playlist."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command('Clear')
async def async_media_next_track(self):
"""Send media_next command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = 'Skip'
if self._status and 'actions' in self._status:
for action in self._status['actions']['action']:
if ('@name' in action and '@url' in action and
action['@name'] == 'skip'):
cmd = action['@url']
return await self.send_bluesound_command(cmd)
async def async_media_previous_track(self):
"""Send media_previous command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = 'Back'
if self._status and 'actions' in self._status:
for action in self._status['actions']['action']:
if ('@name' in action and '@url' in action and
action['@name'] == 'back'):
cmd = action['@url']
return await self.send_bluesound_command(cmd)
async def async_media_play(self):
"""Send media_play command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command('Play')
async def async_media_pause(self):
"""Send media_pause command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command('Pause')
async def async_media_stop(self):
"""Send stop command."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command('Pause')
async def async_media_seek(self, position):
"""Send media_seek command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command(
'Play?seek={}'.format(float(position)))
async def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self.is_grouped and not self.is_master:
return
url = 'Play?url={}'.format(media_id)
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return await self.send_bluesound_command(url)
return await self.send_bluesound_command(url)
async def async_volume_up(self):
"""Volume up the media player."""
current_vol = self.volume_level
if not current_vol or current_vol < 0:
return
return self.async_set_volume_level(((current_vol*100)+1)/100)
async def async_volume_down(self):
"""Volume down the media player."""
current_vol = self.volume_level
if not current_vol or current_vol < 0:
return
return self.async_set_volume_level(((current_vol*100)-1)/100)
async def async_set_volume_level(self, volume):
"""Send volume_up command to media player."""
if volume < 0:
volume = 0
elif volume > 1:
volume = 1
return await self.send_bluesound_command(
'Volume?level=' + str(float(volume) * 100))
async def async_mute_volume(self, mute):
"""Send mute command to media player."""
if mute:
volume = self.volume_level
if volume > 0:
self._lastvol = volume
return await self.send_bluesound_command('Volume?level=0')
return await self.send_bluesound_command(
'Volume?level=' + str(float(self._lastvol) * 100))
| 34.127179
| 78
| 0.58998
|
3e5d038aeed910d682689d04d010bef60fc3c383
| 11,188
|
py
|
Python
|
tests/api/v1/test_users.py
|
AIica/Crypto-2020
|
8980fdd3c20651eb6fd4a66fa72c1f9099dc23d7
|
[
"Apache-2.0"
] | 2
|
2019-06-19T07:11:28.000Z
|
2019-06-21T05:30:07.000Z
|
tests/api/v1/test_users.py
|
AIica/Crypto-2020
|
8980fdd3c20651eb6fd4a66fa72c1f9099dc23d7
|
[
"Apache-2.0"
] | null | null | null |
tests/api/v1/test_users.py
|
AIica/Crypto-2020
|
8980fdd3c20651eb6fd4a66fa72c1f9099dc23d7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.utils import set_config
from tests.helpers import *
def test_api_users_get_public():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config('account_visibility', 'public')
r = client.get('/api/v1/users')
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users')
assert r.status_code == 302
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users')
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_get_private():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config('account_visibility', 'public')
r = client.get('/api/v1/users')
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users')
assert r.status_code == 302
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users')
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_get_admins():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config('account_visibility', 'public')
r = client.get('/api/v1/users')
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users')
assert r.status_code == 302
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users')
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_post_non_admin():
"""Can a user post /api/v1/users if not admin"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post('/api/v1/users', json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_users_post_admin():
"""Can a user post /api/v1/users if admin"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, 'admin') as client:
r = client.post('/api/v1/users', json={
"name": "user",
"email": "user@user.com",
"password": "pass"
})
assert r.status_code == 200
destroy_ctfd(app)
def test_api_team_get_public():
"""Can a user get /api/v1/team/<user_id> if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config('account_visibility', 'public')
gen_user(app.db)
r = client.get('/api/v1/users/2')
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users/2')
assert r.status_code == 302
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users/2')
assert r.status_code == 404
destroy_ctfd(app)
def test_api_team_get_private():
"""Can a user get /api/v1/users/<user_id> if users are private"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
set_config('account_visibility', 'public')
r = client.get('/api/v1/users/2')
print(r.__dict__)
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users/2')
assert r.status_code == 200
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users/2')
assert r.status_code == 404
destroy_ctfd(app)
def test_api_team_get_admin():
"""Can a user get /api/v1/users/<user_id> if users are viewed by admins only"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, 'admin') as client:
gen_user(app.db)
set_config('account_visibility', 'public')
r = client.get('/api/v1/users/2')
assert r.status_code == 200
set_config('account_visibility', 'private')
r = client.get('/api/v1/users/2')
assert r.status_code == 200
set_config('account_visibility', 'admins')
r = client.get('/api/v1/users/2')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_patch_non_admin():
"""Can a user patch /api/v1/users/<user_id> if not admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with app.test_client() as client:
r = client.patch('/api/v1/users/2', json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_patch_admin():
"""Can a user patch /api/v1/users/<user_id> if admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app, 'admin') as client:
r = client.patch('/api/v1/users/2', json={
"name": "user",
"email": "user@ctfd.io",
"password": "password",
"country": "US"
})
assert r.status_code == 200
assert r.get_json()['data'][0]['country'] == 'US'
destroy_ctfd(app)
def test_api_user_delete_non_admin():
"""Can a user delete /api/v1/users/<user_id> if not admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with app.test_client() as client:
r = client.delete('/api/v1/teams/2', json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_delete_admin():
"""Can a user patch /api/v1/users/<user_id> if admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app, 'admin') as client:
r = client.delete('/api/v1/users/2', json="")
assert r.status_code == 200
assert r.get_json().get('data') is None
destroy_ctfd(app)
def test_api_user_get_me_not_logged_in():
"""Can a user get /api/v1/users/me if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/api/v1/users/me')
assert r.status_code == 302
destroy_ctfd(app)
def test_api_user_get_me_logged_in():
"""Can a user get /api/v1/users/me if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/me')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_patch_me_not_logged_in():
"""Can a user patch /api/v1/users/me if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.patch('/api/v1/users/me', json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_patch_me_logged_in():
"""Can a user patch /api/v1/users/me if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.patch('/api/v1/users/me', json={"name": "user",
"email": "user@ctfd.io",
"password": "password",
"confirm": "password",
"country": "US"})
assert r.status_code == 200
assert r.get_json()['data']['country'] == 'US'
destroy_ctfd(app)
def test_api_user_get_me_solves_not_logged_in():
"""Can a user get /api/v1/users/me/solves if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/api/v1/users/me/solves')
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_solves_logged_in():
"""Can a user get /api/v1/users/me/solves if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/me/solves')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_solves():
"""Can a user get /api/v1/users/<user_id>/solves if logged in"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/2/solves')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_me_fails_not_logged_in():
"""Can a user get /api/v1/users/me/fails if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/api/v1/users/me/fails')
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_fails_logged_in():
"""Can a user get /api/v1/users/me/fails if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/me/fails')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_fails():
"""Can a user get /api/v1/users/<user_id>/fails if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/2/fails')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_me_awards_not_logged_in():
"""Can a user get /api/v1/users/me/awards if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/api/v1/users/me/awards')
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_awards_logged_in():
"""Can a user get /api/v1/users/me/awards if logged in"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/me/awards')
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_awards():
"""Can a user get /api/v1/users/<user_id>/awards if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get('/api/v1/users/2/awards')
assert r.status_code == 200
destroy_ctfd(app)
| 33.90303
| 83
| 0.58384
|
706442f2acd8f0f0293f8aebbb359c5fcbc010a2
| 4,422
|
py
|
Python
|
scripts/reddit-retriever.py
|
nicolamelluso/graphbrain
|
c0740ab89df60d709ee2914beb5379619e4e1470
|
[
"MIT"
] | 1
|
2021-04-24T04:52:31.000Z
|
2021-04-24T04:52:31.000Z
|
scripts/reddit-retriever.py
|
nicolamelluso/graphbrain
|
c0740ab89df60d709ee2914beb5379619e4e1470
|
[
"MIT"
] | null | null | null |
scripts/reddit-retriever.py
|
nicolamelluso/graphbrain
|
c0740ab89df60d709ee2914beb5379619e4e1470
|
[
"MIT"
] | 1
|
2020-12-23T11:20:43.000Z
|
2020-12-23T11:20:43.000Z
|
# TODO: this is no longer woring due to recent Reddit API changes
import time
import datetime
import json
import argparse
import praw
class RedditRetriever(object):
def __init__(self, _subreddit, _outfile, _start_date, _end_date, step=3600):
self.r = praw.Reddit(site_name='graphbrain', user_agent='GraphBrain (http://graphbrain.org)')
self.subreddit = _subreddit
self.output_file = _outfile
self.step = step
self.start_ts = int(time.mktime(datetime.datetime.strptime(_start_date, "%d/%m/%Y").timetuple()))
self.end_ts = int(time.mktime(datetime.datetime.strptime(_end_date, "%d/%m/%Y").timetuple()))
self.cur_ts = self.start_ts
self.posts = 0
self.comments = 0
self.retry_wait = 30
def print_status(self):
delta_t = self.end_ts - self.start_ts
done_t = self.cur_ts - self.start_ts
per = (float(done_t) / float(delta_t)) * 100.
print('retrieving subreddit: %s [%.2f%% done] --- %s posts; %s comments'
% (self.subreddit, per, self.posts, self.comments))
def build_comment(self, comment):
if hasattr(comment, 'replies'):
replies = [self.build_comment(reply) for reply in comment.replies if reply is not None]
else:
replies = []
if not hasattr(comment, 'body'):
return None
if hasattr(comment, 'author') and comment.author is not None:
author = comment.author.name
else:
author = ''
self.comments += 1
return {'id': comment.id,
'author': author,
'body': comment.body,
'score': comment.score,
'ups': comment.ups,
'downs': comment.downs,
'created': comment.created,
'created_utc': comment.created_utc,
'comments': replies}
def comments_tree(self, post):
top_level_comments = list(post.comments)
return [self.build_comment(comment) for comment in top_level_comments]
def retrieve_posts(self):
for ts in range(self.cur_ts, self.end_ts, self.step):
self.cur_ts = ts
query = 'timestamp:%s..%s' % (str(ts), str(ts + self.step))
self.print_status()
search_results = self.r.subreddit(self.subreddit).search(query, syntax='cloudsearch')
for res in search_results:
comments = self.comments_tree(res)
post = {'id': res.id,
'title': res.title,
'author': res.author.name,
'permalink': res.permalink.replace('?ref=search_posts', ''),
'url': res.url,
'selftext': res.selftext,
'score': res.score,
'ups': res.ups,
'downs': res.downs,
'created': res.created,
'created_utc': res.created_utc,
'comments': comments}
self.posts += 1
# write to file
with open(self.output_file, 'a') as file:
file.write('%s\n' % json.dumps(post, separators=(',', ':')))
def run(self):
print('writing to file: %s' % self.output_file)
while True:
try:
self.retrieve_posts()
print('done.')
exit()
except KeyboardInterrupt:
exit()
except SystemExit:
exit()
except Exception as e:
print('exception: %s' % str(e))
print('retrying in %s seconds...' % self.retry_wait)
time.sleep(self.retry_wait)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--outfile', type=str, help='output file', default=None)
parser.add_argument('--startdate', type=str, help='start date', default=None)
parser.add_argument('--enddate', type=str, help='end date', default=None)
parser.add_argument('--subreddit', type=str, help='subreddit to retrieve.', default=None)
args = parser.parse_args()
subreddit = args.subreddit
outfile = args.outfile
startdate = args.startdate
enddate = args.enddate
rr = RedditRetriever(subreddit, outfile, startdate, enddate)
rr.run()
| 37.794872
| 105
| 0.553143
|
24a52e2de6eea876eddbf9c958866df7b41c00cd
| 2,618
|
py
|
Python
|
model-optimizer/mo/front/tf/common.py
|
jayabs2020/openvino
|
67a82a040faaf66f109035acf7de6e4b7568bc08
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/tf/common.py
|
jayabs2020/openvino
|
67a82a040faaf66f109035acf7de6e4b7568bc08
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/mo/front/tf/common.py
|
jayabs2020/openvino
|
67a82a040faaf66f109035acf7de6e4b7568bc08
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from tensorflow.core.framework import types_pb2 as tf_types # pylint: disable=no-name-in-module,import-error
# Suppress false positive pylint warning about function with too many arguments
# pylint: disable=E1121
# mapping between TF data type and numpy data type and function to extract data from TF tensor
_tf_np_mapping = [('DT_BOOL', np.bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)),
('DT_INT8', np.int8, lambda pb: pb.int_val, lambda x: np.int8(x)),
('DT_INT16', np.int16, lambda pb: pb.int_val, lambda x: np.int16(x)),
('DT_INT32', np.int32, lambda pb: pb.int_val, lambda x: np.int32(x)),
('DT_INT64', np.int64, lambda pb: pb.int64_val, lambda x: np.int64(x)),
('DT_UINT8', np.uint8, lambda pb: pb.uint8_val, lambda x: np.uint8(x)),
('DT_UINT16', np.uint16, lambda pb: pb.int_val, lambda x: np.uint16(x)),
('DT_UINT32', np.uint32, lambda pb: pb.uint32_val, lambda x: np.uint32(x)),
('DT_UINT64', np.uint64, lambda pb: pb.uint64_val, lambda x: np.uint64(x)),
('DT_HALF', np.float16, lambda pb: np.uint16(pb.half_val).view(np.float16), lambda x: np.float16(x)),
('DT_FLOAT', np.float32, lambda pb: pb.float_val, lambda x: np.float32(x)),
('DT_DOUBLE', np.double, lambda pb: pb.double_val, lambda x: np.double(x)),
('DT_STRING', np.str, lambda pb: pb.string_val, lambda x: np.str(x)),
]
tf_data_type_decode = {getattr(tf_types, tf_dt): (np_type, func) for tf_dt, np_type, func, _ in _tf_np_mapping if
hasattr(tf_types, tf_dt)}
tf_data_type_cast = {np_type: cast for tf_dt, np_type, _, cast in _tf_np_mapping if hasattr(tf_types, tf_dt)}
def bool_cast(x):
if isinstance(x, str):
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
else:
return np.bool(x)
| 53.428571
| 119
| 0.652788
|
752e9db5ba5d909aad96d679a468b228942bd9b3
| 17,087
|
py
|
Python
|
Lib/distutils/tests/test_sdist.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 33
|
2021-07-25T14:23:35.000Z
|
2022-03-31T00:17:30.000Z
|
Lib/distutils/tests/test_sdist.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 50
|
2020-01-07T19:11:11.000Z
|
2022-03-01T14:40:03.000Z
|
Lib/distutils/tests/test_sdist.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 8
|
2020-10-06T14:38:08.000Z
|
2022-01-12T14:29:46.000Z
|
"""Tests for distutils.command.sdist."""
import os
import tarfile
import unittest
import warnings
import zipfile
from os.path import join
from textwrap import dedent
from test.support import captured_stdout, run_unittest
from test.support.warnings_helper import check_warnings
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
from distutils.tests.test_config import BasePyPIRCCommandTestCase
from distutils.errors import DistutilsOptionError
from distutils.spawn import find_executable
from distutils.log import WARN
from distutils.filelist import FileList
from distutils.archive_util import ARCHIVE_FORMATS
SETUP_PY = """
from distutils.core import setup
import somecode
setup(name='fake')
"""
MANIFEST = """\
# file GENERATED by distutils, do NOT edit
README
buildout.cfg
inroot.txt
setup.py
data%(sep)sdata.dt
scripts%(sep)sscript.py
some%(sep)sfile.txt
some%(sep)sother_file.txt
somecode%(sep)s__init__.py
somecode%(sep)sdoc.dat
somecode%(sep)sdoc.txt
"""
class SDistTestCase(BasePyPIRCCommandTestCase):
def setUp(self):
# PyPIRCCommandTestCase creates a temp dir already
# and put it in self.tmp_dir
super(SDistTestCase, self).setUp()
# setting up an environment
self.old_path = os.getcwd()
os.mkdir(join(self.tmp_dir, 'somecode'))
os.mkdir(join(self.tmp_dir, 'dist'))
# a package, and a README
self.write_file((self.tmp_dir, 'README'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
os.chdir(self.tmp_dir)
def tearDown(self):
# back to normal
os.chdir(self.old_path)
super(SDistTestCase, self).tearDown()
def get_cmd(self, metadata=None):
"""Returns a cmd"""
if metadata is None:
metadata = {'name': 'fake', 'version': '1.0',
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
return dist, cmd
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_prune_file_list(self):
# this test creates a project with some VCS dirs and an NFS rename
# file, then launches sdist to check they get pruned on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir, 'somecode', '.hg',
'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir, 'somecode', '.git',
'ok'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
# now building a sdist
dist, cmd = self.get_cmd()
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
expected = ['', 'PKG-INFO', 'README', 'setup.py',
'somecode/', 'somecode/__init__.py']
self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
@unittest.skipIf(find_executable('tar') is None,
"The tar command is not found")
@unittest.skipIf(find_executable('gzip') is None,
"The gzip command is not found")
def test_make_distribution(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
# making sure we have two files
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_add_defaults(self):
# http://bugs.python.org/issue2279
# add_default should also include
# data_files and package_data
dist, cmd = self.get_cmd()
# filling data_files by pointing files
# in package_data
dist.package_data = {'': ['*.cfg', '*.dat'],
'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
# adding some data in data_files
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
# make sure VCS directories are pruned (#14004)
hg_dir = join(self.tmp_dir, '.hg')
os.mkdir(hg_dir)
self.write_file((hg_dir, 'last-message.txt'), '#')
# a buggy regex used to prevent this from working on windows (#6884)
self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [('data', ['data/data.dt',
'buildout.cfg',
'inroot.txt',
'notexisting']),
'some/file.txt',
'some/other_file.txt']
# adding a script
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything was added
expected = ['', 'PKG-INFO', 'README', 'buildout.cfg',
'data/', 'data/data.dt', 'inroot.txt',
'scripts/', 'scripts/script.py', 'setup.py',
'some/', 'some/file.txt', 'some/other_file.txt',
'somecode/', 'somecode/__init__.py', 'somecode/doc.dat',
'somecode/doc.txt']
self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
# checking the MANIFEST
f = open(join(self.tmp_dir, 'MANIFEST'))
try:
manifest = f.read()
finally:
f.close()
self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_metadata_check_option(self):
# testing the `medata-check` option
dist, cmd = self.get_cmd(metadata={})
# this should raise some warnings !
# with the `check` subcommand
cmd.ensure_finalized()
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 2)
# trying with a complete set of metadata
self.clear_logs()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = 0
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 0)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
dist, cmd = self.get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_show_formats(self):
with captured_stdout() as stdout:
show_formats()
# the output should be a header line + one line per format
num_formats = len(ARCHIVE_FORMATS.keys())
output = [line for line in stdout.getvalue().split('\n')
if line.strip().startswith('--formats=')]
self.assertEqual(len(output), num_formats)
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
# default options set by finalize
self.assertEqual(cmd.manifest, 'MANIFEST')
self.assertEqual(cmd.template, 'MANIFEST.in')
self.assertEqual(cmd.dist_dir, 'dist')
# formats has to be a string splitable on (' ', ',') or
# a stringlist
cmd.formats = 1
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.formats = ['zip']
cmd.finalize_options()
# formats has to be known
cmd.formats = 'supazipa'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# the following tests make sure there is a nice error message instead
# of a traceback when parsing an invalid manifest template
def _check_template(self, content):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
cmd.ensure_finalized()
cmd.filelist = FileList()
cmd.read_template()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 1)
def test_invalid_template_unknown_command(self):
self._check_template('taunt knights *')
def test_invalid_template_wrong_arguments(self):
# this manifest command takes one argument
self._check_template('prune')
@unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
def test_invalid_template_wrong_path(self):
# on Windows, trailing slashes are not allowed
# this used to crash instead of raising a warning: #8286
self._check_template('include examples/')
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_get_file_list(self):
# make sure MANIFEST is recalculated
dist, cmd = self.get_cmd()
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(len(manifest), 5)
# adding a file
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
# make sure build_py is reinitialized, like a fresh run
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest2 = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
# do we have the new file in MANIFEST ?
self.assertEqual(len(manifest2), 6)
self.assertIn('doc2.txt', manifest2[-1])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_manifest_marker(self):
# check that autogenerated MANIFESTs have a marker
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest[0],
'# file GENERATED by distutils, do NOT edit')
@unittest.skipUnless(ZLIB_SUPPORT, "Need zlib support to run")
def test_manifest_comments(self):
# make sure comments don't cause exceptions or wrong includes
contents = dedent("""\
# bad.py
#bad.py
good.py
""")
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), contents)
self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
cmd.run()
self.assertEqual(cmd.filelist.files, ['good.py'])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_manual_manifest(self):
# check that a MANIFEST without a marker is left alone
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
self.write_file((self.tmp_dir, 'README.manual'),
'This project maintains its MANIFEST file itself.')
cmd.run()
self.assertEqual(cmd.filelist.files, ['README.manual'])
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest, ['README.manual'])
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
filenames = [tarinfo.name for tarinfo in archive]
finally:
archive.close()
self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO',
'fake-1.0/README.manual'])
@unittest.skipUnless(ZLIB_SUPPORT, "requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
@unittest.skipIf(find_executable('tar') is None,
"The tar command is not found")
@unittest.skipIf(find_executable('gzip') is None,
"The gzip command is not found")
def test_make_distribution_owner_group(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar and specifying the owner+group
cmd.formats = ['gztar']
cmd.owner = pwd.getpwuid(0)[0]
cmd.group = grp.getgrgid(0)[0]
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
# building a sdist again
dist, cmd = self.get_cmd()
# creating a gztar
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
# note that we are not testing the group ownership here
# because, depending on the platforms and the container
# rights (see #7408)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, os.getuid())
finally:
archive.close()
def test_suite():
return unittest.makeSuite(SDistTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| 34.589069
| 78
| 0.590098
|
139111a984737ce607d3c5b79b759829893d2de0
| 205
|
py
|
Python
|
examples/movies-data-web/app/viewmodels/movies.py
|
sunyunxian/flask-examples
|
7ad53ce1d34354c22b7f1728218ab6a64058ccb4
|
[
"Apache-2.0"
] | null | null | null |
examples/movies-data-web/app/viewmodels/movies.py
|
sunyunxian/flask-examples
|
7ad53ce1d34354c22b7f1728218ab6a64058ccb4
|
[
"Apache-2.0"
] | null | null | null |
examples/movies-data-web/app/viewmodels/movies.py
|
sunyunxian/flask-examples
|
7ad53ce1d34354c22b7f1728218ab6a64058ccb4
|
[
"Apache-2.0"
] | null | null | null |
class MoviesViewModels:
def __init__(self) -> None:
pass
def package_single(self, data, keyword):
returned = {
}
def package_collection(self, keyword):
pass
| 15.769231
| 44
| 0.585366
|
01ae065a088cab248272528b50b763b9d419b96a
| 20,121
|
py
|
Python
|
cigarco/mapping.py
|
aganezov/B2_CIGARCO
|
d01a77008dfe16d039308fe266036980daa31570
|
[
"MIT"
] | null | null | null |
cigarco/mapping.py
|
aganezov/B2_CIGARCO
|
d01a77008dfe16d039308fe266036980daa31570
|
[
"MIT"
] | null | null | null |
cigarco/mapping.py
|
aganezov/B2_CIGARCO
|
d01a77008dfe16d039308fe266036980daa31570
|
[
"MIT"
] | null | null | null |
import bisect
from dataclasses import dataclass, field
from functools import lru_cache
from itertools import accumulate
from typing import List, Tuple, Set, Optional, Dict
from cigarco.cigar_utils import is_valid_cigar, parse_cigar, TARGET_CONSUMING_OPERATIONS, QUERY_CONSUMING_OPERATIONS
@dataclass(frozen=True, eq=True)
class Alignment(object):
"""
An alignment class an instance of which contains all the information about a given query->target alignment
Args:
query_name (str): a name of the query which alignment we represent
target_name (str): a name of the target for the represented alignment
start (int): a start coordinate for the query alignment w.r.t. to the target
cigar (str): a CIGAR encoded (doc: https://samtools.github.io/hts-specs/SAMv1.pdf , page 7) alignment of the query to the target
direction (bool): a flag to indicate 5'->3' (True) and 3'->5' (False) alignment orientation
Example:
Alignment("tr1", "chr1" 0, "11M")
"""
query_name: str
target_name: str
start: int # no default value of 0 specified as different schools of thought may have 0 or 1 as defaults, and explicit is better than implicit
cigar: str
direction: bool = True
def __post_init__(self):
"""
Ensuring the validity of the created alignment abject, as subsequent methods that accept the alignment object expect a valid alignment
Raises:
ValueError: if the start coordinate is negative
"""
if self.start < 0:
raise ValueError(f"incorrect start coordinate {self.start}. Must be a non-negative integer")
if not is_valid_cigar(self.cigar):
raise ValueError(f"invalid CIGAR string '{self.cigar}'")
class AlignmentDescriptor(object):
"""
A descriptor designed for controlling access to the .alignment attribute in the CMapper class object to ensure that all precomputed data structures are invalidated
"""
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner=None) -> Optional[Alignment]:
return instance.__dict__.get(self.name)
def __set__(self, instance, value):
"""
On access to the .alignment attribute, if the target class is a CMapper (we don't care about other classes),
we want to ensure that the prefix sums arrays are invalidated, and the cache on the coordinate transformation function is cleared
"""
if isinstance(instance, CMapper):
instance._query_prefix_sums = None
instance._target_prefix_sums = None
instance._matching_backtracking = None
instance.transform_coordinate.cache_clear()
instance.__dict__[self.name] = value
@dataclass
class CMapper(object):
"""
The main object that provides coordinate conversion for a given Alignment object.
The main computational idea behind the CMapper machinery is the prefix sum arrays, which are used to efficiently convert the query coordinate to the target coordinate
Coordinates that map to the insertion in the query are transformed to coordinate of he last matching character in the alignment (or 0 if no exists)
Several optimization techniques are utilized to speed up the process:
* prefix sum arrays are computed only once on first access via a combination of private attributes and property access control
* alignment object reassignment (controlled via a descriptor protocol) invalidates the computed prefix sums ensuring that coordinate mapping is always done
w.r.t. current alignment;
* cashing is utilized for mapping function (invalidated on alternation to alignment attribute object)
* for efficient identification of the last matching character in the alignment, when translating coordinates that fall within insertions,
we compute an matching backtracking array that specifies where is the last query & target consuming alignment operation w.r.t. to a current one
Overall complexity of the CMapper is dependent on the size n of the CIGAR alignment string (where n refers to the number of operations in a parsed CIGAR string):
* prefix sum construction takes O(n); on first access only
* coordinate mapping takes O(log(n)) steps at worst (and O(1) for repeated lookups)
Args:
alignment (Alignments): an alignment object for which the coordinate conversion (i.e., mapping) is performed. Alignment object is assumed to be valid
"""
alignment: Alignment = AlignmentDescriptor()
_alignment: Alignment = field(init=False, repr=False, compare=False)
_query_prefix_sums: Optional[List[int]] = field(init=False, repr=False, compare=False)
_target_prefix_sums: Optional[List[int]] = field(init=False, repr=False, compare=False)
_matching_backtracking: Optional[List[int]] = field(init=False, repr=False, compare=False)
@property
def query_prefix_sums(self) -> List[int]:
""" Implements a descriptor-like protocol for accessing prefix sums arrays for query consuming operations in CIGAR string
computation is only invoked if the underlying data-holding attribute is set to None, which can happen either before the first access to the prefix sums array,
or if the alignment object has been changed and prefix sums arrays have been invalidated
Returns:
prefix sums (List[int]) for all operations in alignment CIGAR string
"""
if self._query_prefix_sums is None:
self.compute_query_prefix_sums()
return self._query_prefix_sums
def compute_query_prefix_sums(self):
"""
Computes prefix sums array for query consuming operations in the alignment object's CIGAR string
TODO: rewrite with iterators approach
"""
cigar_operations: List[Tuple[int, str]] = parse_cigar(self.alignment.cigar, direction=self.alignment.direction)
query_op_cnts = [cnt if op in QUERY_CONSUMING_OPERATIONS else 0 for cnt, op in cigar_operations]
self._query_prefix_sums = self.compute_prefix_sums(query_op_cnts)
@property
def target_prefix_sums(self) -> List[int]:
""" Implements a descriptor-like protocol for accessing prefix sums arrays for target consuming operations in CIGAR string
computation is only invoked if the underlying data-holding attribute is set to None, which can happen either before the first access to the prefix sums array,
or if the alignment object has been changed and prefix sums arrays have been invalidated
Returns:
prefix sums (List[int]) for all operations in alignment CIGAR string
"""
if self._target_prefix_sums is None:
self.compute_target_prefix_sums()
return self._target_prefix_sums
@property
def matching_backtracking(self) -> List[int]:
if self._matching_backtracking is None:
self._matching_backtracking = self.compute_matching_backtracking()
return self._matching_backtracking
def compute_matching_backtracking(self) -> List[int]:
"""
In linear time computes the matching backtracking array that helps to speed up the lookup for the last query & target consuming operation,
thus ensuring the stability of the running time of the transformation algorithm
Returns:
Matching backtrack(List[int]): a list of indexes that specify what is the index of the last query & target consuming alignment operation w.r.t. the given one
(can be itself)
"""
last_match_index: int = -1
result: List[int] = []
qt_consuming_operations: Set[str] = QUERY_CONSUMING_OPERATIONS & TARGET_CONSUMING_OPERATIONS
for index, (cnt, op) in enumerate(parse_cigar(self.alignment.cigar)):
if cnt > 0 and op in qt_consuming_operations:
last_match_index = index
result.append(last_match_index)
return result
def compute_target_prefix_sums(self):
"""
Computes prefix sums array for target consuming operations in the alignment object's CIGAR string
"""
cigar_operations: List[Tuple[int, str]] = parse_cigar(self.alignment.cigar, direction=self.alignment.direction)
target_op_cnts = [cnt if op in TARGET_CONSUMING_OPERATIONS else 0 for cnt, op in cigar_operations]
self._target_prefix_sums = self.compute_prefix_sums(target_op_cnts)
@staticmethod
def compute_prefix_sums(values: List[int]) -> List[int]:
"""
Stateless (thus staticmethod) utility function that computes prefix sums array for a given integer array.
For a given array A a prefix sum array A' is defined as follows:
for i >= 0: A'[i] = sum(A[0] ... A[i])
Provided implementation works in linear O(n) time, where `n` is the length of the input array
Args:
values (List[int]): a list of integers
Returns:
prefix sums (List[int]): a list a prefix sums for the input list
Examples:
>>> CMapper.compute_prefix_sums([1,2,3])
[1,3,6]
>>> CMapper.compute_prefix_sums([])
[]
"""
return list(accumulate(values))
@lru_cache(maxsize=None)
def transform_coordinate(self, source_coordinate: int, direction: str = 'QT') -> int:
"""
The main method to be invoked for coordinate transformation from query -> target coordinate systems.
Uses precomputed (or lazily evaluated on the first invocation) prefix sums arrays for query/target consuming operations and binary search for efficient lookup.
Also utilizes memoization to reduce computational load in case of identical transformation requests (cache is invalidated if the alignment object is altered)
Coordinates that map to insertion in query (w.r.t. target) are transformed to the coordinate of the insertion start
(i.e., the last target coordinate before the insertion seq)
Args:
source_coordinate (int): source coordinate in query coordinate system to be translated to the target coordinate system
when source is the aligned query, not a target, and alignment orientation is reversed, the coordinate is assumed to be given w.r.t. to the original read, not the alignment
when the source is the alignment atrget, not a query, the coordinate is considered w.r.t. target coordinate system, ragardless if the alignment is revrssed or not
direction (str): a direction for the coordinate transformation with 'QT' encoding a query->target and 'TQ' encoding a target->query
when 'TQ` is specified, all comment/doc notation for query and target shall be reversed
Returns:
target coordinate (int): a position that the argument source coordinate maps to in the target sequence
Raises:
ValueError: if the input source coordinate is negative or greater than the length of the query sequence
Examples:
>>> CMapper(Alignment("tr1", "chr1", 3, "8M7D6M2I2M11D7M")).transform_coordinate(4)
7
>>> CMapper(Alignment("tr1", "chr1", 3, "8M7D6M2I2M11D7M")).transform_coordinate(13)
23
>>> CMapper(Alignment("tr2", "chr2", 10, "20M")).transform_coordinate(0)
10
>>> CMapper(Alignment("tr2", "chr2", 10, "20M")).transform_coordinate(10)
20
"""
query_prefix_sums = self.query_prefix_sums
target_prefix_sums = self.target_prefix_sums
if direction != 'QT':
query_prefix_sums, target_prefix_sums = target_prefix_sums, query_prefix_sums
if self.alignment.direction:
source_coordinate -= self.alignment.start
else:
source_coordinate = self.alignment.start + query_prefix_sums[-1] - 1 - source_coordinate
if source_coordinate < 0 or source_coordinate > max(0, query_prefix_sums[-1] - 1):
# last value in prefix sums array is the length of the query, but we need to account for the 0-based index
raise ValueError(f"Can't transform coordinate {source_coordinate}, outside of query coordinate system")
# identifies the last operation that would have consumed x <= source_coordinate bases in th query
operation_index: int = bisect.bisect_right(query_prefix_sums, source_coordinate)
# special edge case where the alignment cigar string has no query consuming operations, in which case we default to the beginning of the alignment
# while highly improbable -- still allowed by the CIGAR specification in the SAM format
if source_coordinate == 0 and query_prefix_sums[-1] == 0:
return self.alignment.start + int(not self.alignment.direction) * target_prefix_sums[-1]
# sanity check, does not waste resources at all, but if really needed, can be avoided with with -O flag in execution
assert 0 <= operation_index <= len(query_prefix_sums) - 1
# we get the operation index for the last stretch where there was a match between query an alignment,
# this is needed for ensuring that coordinates in non-target-consuming operations (i.e., insertions) map to the left-closest matching position
last_matching_index: int = self.matching_backtracking[operation_index]
# computing how much query has been consumed by the latest operation not covering the queried coordinate,
# this is required for figure out how much of a non-consumed query we are left with
query_consumed: int = 0 if operation_index == 0 else query_prefix_sums[operation_index - 1]
# computing the amount of target-matching query we are left with
# of the last_matching index and operation indexes don't match, we are in the non-target-consuming are need to set remaining query length to -1,
# to ensure left-padded insertion coordinate mapping
query_remaining = -1 if operation_index != last_matching_index else (source_coordinate - query_consumed)
# if we are in a matching operation, we need to decrement the matching operation index by 1 to ensure that we correctly calculate consumed target sequence
# (up until the identified operation)
last_matching_index -= int(last_matching_index == operation_index)
# target is only consumed to the last matching operation (not counting the one in which the query coordinate lies)
target_consumed: int = 0 if last_matching_index < 0 else target_prefix_sums[last_matching_index]
# we need to ensure that we don't end up with negative offset, which can come from weird valid CIGAR strings and the setup above (e.g., "I5")
if direction == 'QT':
if self.alignment.direction:
result: int = self.alignment.start + max(target_consumed + query_remaining, 0)
else:
result: int = self.alignment.start + target_prefix_sums[-1] - 1 - max(target_consumed + query_remaining, 0)
else:
result = max(target_consumed + query_remaining, 0)
return result
def __hash__(self):
return hash(self.alignment)
@dataclass(frozen=True, eq=True)
class TransformationEntry(object):
"""
Simple holder class for the sequence name and coordinate for/of transformation
Allows for nice data encapsulation, while not eating almost any extra space, because of the __slots__ usage
"""
__slots__ = ('seq_name', 'coordinate')
seq_name: str
coordinate: int
@dataclass(frozen=True, eq=True)
class TransformedResult(TransformationEntry):
"""
Marker class to allow for type safety, if desired
"""
@dataclass(frozen=True, eq=True)
class TransformationQuery(TransformationEntry):
"""
Marker class to allow for type safety, if desired
"""
@dataclass
class CManager(object):
""" Main class that manages storage of multiple alignments (one alignment per query) and allows for efficient coordinate transformation from query coordinate system
to that of the alignment target one
Outsources the actual computations to the CMapper class, that is created for every added alignment
Keeps track of added alignments so as to when an identical alignment is added no recomputation is going to be performed
First query for a given alignment takes O(m) + O(log(m)), where m is the number of operation in the alignment CIGAR string
Subsequent queries of previously UNqueried values take O(log(m))
Queries are cached, so subsequent queries of previously queried values take O(1)
Examples:
# setup
>>> m = CManager()
>>> m.add_alignment(Alignment("TR1", "CHR1", 3, "8M7D6M2I2M11D7M"))
>>> m.add_alignment(Alignment("TR2", "CHR2", 10, "20M"))
# quering
>>> m.transform_coordinate("TR1", 4)
TransformedCoordinate(seq_name="CHR1", coordinate=7)
>>> m.transform_coordinate("TR2", 0)
TransformedCoordinate(seq_name="CHR2", coordinate=10)
"""
alignments_by_query_ids: Dict[str, CMapper] = field(default_factory=lambda: {})
def add_alignment(self, alignment: Alignment):
"""
Wrapper method that adds a new Alignment instance to the internal structure of the Manager, and wraps the alignment object into CMapper object
When addition of a duplicate alignment is attempted, no action is performed, thus keeping the potentially computed coordinate transformation data structures intact
Args:
alignment (Alignment): an instance of an Alignment class
"""
if alignment.query_name in self.alignments_by_query_ids:
mapper = self.alignments_by_query_ids[alignment.query_name]
if mapper.alignment != alignment:
self.alignments_by_query_ids[alignment.query_name].alignment = alignment
else:
self.alignments_by_query_ids[alignment.query_name] = CMapper(alignment)
def transform_coordinate(self, query_name: str, source_coordinate: int) -> TransformedResult:
""" The main computational method for coordinate transformation for a given position in a specified query
On its own just checks if the mapper for a given query exists and then outsources the actual new coordinate value computation to it
Caching is implemented at the level of a mapper, and not here, as with addition of (new) alignments only parts of cache would ideally be invalidated (for respective query),
but this is not feasible with basic lru_cache, though at the level of the mapper this is exactly what is implemented
Args:
query_name (str): name of the query for which alignment the coordinate transformation is going to take place
source_coordinate (int): a coordinate on specified query, which is going to be transformed into a alignment target corodinate system
Returns:
transformed coordinate (TransformedCoordinate): a dataclass (akin to pair tuple) of a name of a target sequence,
and a transformed query coordinate in target coordinate system
Raises:
ValueError: if the supplied query does not have an alignment, or if the coordinate transformation fails (propagated from CMapper.transform_coordinate method)
"""
if query_name not in self.alignments_by_query_ids:
raise ValueError(f"Attempted to transform coordinate {source_coordinate} for query '{query_name}', but no alignments for '{query_name}' exist")
mapper: CMapper = self.alignments_by_query_ids[query_name]
target_seq_name: str = mapper.alignment.target_name
result_coordinate: int = mapper.transform_coordinate(source_coordinate)
return TransformedResult(target_seq_name, result_coordinate)
| 54.528455
| 187
| 0.701009
|
9e041e90580f0bc6179dc9e3834c50735ff3a40d
| 20,029
|
py
|
Python
|
open_spiel/python/egt/visualization.py
|
awesome-archive/open_spiel
|
4cff68f4f305ab8d158c3721648f8286b89d1546
|
[
"Apache-2.0"
] | 1
|
2019-08-30T01:56:30.000Z
|
2019-08-30T01:56:30.000Z
|
open_spiel/python/egt/visualization.py
|
awesome-archive/open_spiel
|
4cff68f4f305ab8d158c3721648f8286b89d1546
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/egt/visualization.py
|
awesome-archive/open_spiel
|
4cff68f4f305ab8d158c3721648f8286b89d1546
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization for single/multi-population dynamics in normal-form games.
Example:
game = pyspiel.load_game("matrix_pd")
payoff_tensor = utils.nfg_to_ndarray(game)
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, dynamics.replicator)
ax = plt.subplot(projection="2x2")
ax.quiver(dyn)
"""
from absl import logging
# pylint: disable=g-import-not-at-top
try:
from matplotlib import axes
from matplotlib import projections
from matplotlib import transforms
from matplotlib import font_manager
from matplotlib import rcParams
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.patches import FancyArrowPatch
from matplotlib.collections import LineCollection
import matplotlib.cm
import matplotlib.colors
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise ImportError(str(e))
import numpy as np
from open_spiel.python.egt import utils
def _eval_dynamics_2x2_grid(dynamics, num_points):
"""Evaluates dynamics on a 2-D mesh-grid.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the grid.
Returns:
Mesh-grid (x, y) and corresponding derivatives of the first action for
player 1 and 2 (u, v).
"""
assert dynamics.payoff_tensor.shape == (2, 2, 2)
x = np.linspace(0., 1., num_points + 2)[1:-1]
x, y = np.meshgrid(x, x)
u = np.empty(x.shape)
v = np.empty(x.shape)
for i in range(num_points):
for j in range(num_points):
row_state = np.array([x[i, j], 1. - x[i, j]])
col_state = np.array([y[i, j], 1. - y[i, j]])
state = np.concatenate((row_state, col_state))
dstate = dynamics(state)
u[i][j] = dstate[0]
v[i][j] = dstate[2]
return x, y, u, v
def _rk12_step(func, y0, dt):
"""Improved Euler-Integration step to integrate dynamics.
Args:
func: Function handle to time derivative.
y0: Current state.
dt: Integration step.
Returns:
Next state.
"""
dy = func(y0)
y_ = y0 + dt * dy
return y0 + dt / 2. * (dy + func(y_))
class Dynamics2x2Axes(axes.Axes):
"""Axes for 2x2 game dynamics.
This class provides plotting functions for dynamics in two-player 2x2 games.
Attributes:
name: Used for projection keyword when creating a new axes.
"""
name = "2x2"
def cla(self):
"""Clear the current axes."""
super(Dynamics2x2Axes, self).cla()
self.set_aspect("equal")
self.set_xlim(0, 1)
self.set_ylim(0, 1)
def quiver(self,
dynamics,
num_points=9,
normalize=False,
pivot="middle",
**kwargs):
"""Visualizes the dynamics as a directional field plot.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the plot.
normalize: Normalize each arrow to unit-length.
pivot: In `{"tail", "middle", "tip"}`, optional, default: "middle". The
part of the arrow that is anchored to the X, Y grid. The arrow rotates
about this point.
**kwargs: Additional keyword arguments passed on to `Axes.quiver`.
Returns:
The `quiver.Quiver` object created by calling `Axes.quiver`.
"""
x, y, u, v = _eval_dynamics_2x2_grid(dynamics, num_points)
if normalize:
norm = np.sqrt(u**2 + v**2)
u = np.divide(u, norm, out=np.zeros_like(u), where=norm != 0)
v = np.divide(v, norm, out=np.zeros_like(v), where=norm != 0)
return super(Dynamics2x2Axes, self).quiver(
x, y, u, v, pivot=pivot, **kwargs)
def streamplot(self,
dynamics,
num_points=50,
linewidth=None,
color=None,
**kwargs):
"""Visualizes the dynamics as a streamline plot.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the plot.
linewidth: In `{None, float, "velocity"}`, optional, default: None. If
`linewidth="velocity"`, line width is scaled by the velocity of the
dynamics. Defaults to `rcParams` if `linewidth=None`.
color: In `{None, string, (r,g,b), (r,g,b,a), "velocity"}`, default: None.
If `color="velocity"`, velocity of dynamics is used to color the
streamlines. Defaults to `rcParams` if `color=None`.
**kwargs: Additional keyword arguments passed on to `Axes.streamplot`.
Returns:
The `streamplot.StreamplotSet` created by calling `Axes.streamplot`.
"""
x, y, u, v = _eval_dynamics_2x2_grid(dynamics, num_points)
if linewidth == "velocity" or color == "velocity":
vel = np.sqrt(u**2 + v**2)
vel = vel - np.min(vel)
vel = vel / np.max(vel)
if linewidth == "velocity":
linewidth = 3. * vel
if color == "velocity":
color = vel
return super(Dynamics2x2Axes, self).streamplot(
x, y, u, v, minlength=0.1, linewidth=linewidth, color=color, **kwargs)
projections.register_projection(Dynamics2x2Axes)
class SimplexTransform(transforms.Transform):
"""Affine transform to project the 2-simplex to 2D Cartesian space."""
input_dims = 3
output_dims = 2
_MATRIX = np.array([[0., 0.], [1., 0.], [0.5, np.sqrt(3) / 2.]])
def transform_affine(self, values):
return np.matmul(values, SimplexTransform._MATRIX)
class SimplexStreamMask(object):
"""Mask of regular discrete cells to track trajectories/streamlines.
Also see `matplotlib.streamplot.StreamMask`.
"""
def __init__(self, density=1.):
self._n = np.int(30. * density)
self._mask = np.zeros([self._n + 1] * 2 + [2], dtype=np.bool)
self.shape = self._mask.shape
def index(self, point):
"""Computes index given a point on the simplex."""
point = np.array(point)
idx = np.floor(point[:2] * self._n).astype(int)
x, y = point[:2] * self._n - idx
z = int(x + y > 1)
return tuple(idx.tolist() + [z])
def point(self, index):
"""Computes point on the simplex given an index."""
p = np.empty((3,))
p[0] = (index[0] + (1 + index[2]) / 3.) / float(self._n)
p[1] = (index[1] + (1 + index[2]) / 3.) / float(self._n)
p[2] = 1. - p[0] - p[1]
return p if p[2] > 0. else None
def __getitem__(self, point):
return self._mask.__getitem__(self.index(point))
def __setitem__(self, point, val):
return self._mask.__setitem__(self.index(point), val)
class Dynamics3x3Axes(axes.Axes):
"""Axes for 3x3 game dynamics.
This class provides plotting functions for dynamics in symmetric 3x3 games.
Attributes:
name: Used for projection keyword when creating a new axes.
"""
name = "3x3"
_VERTICES = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
def __init__(self, fig, rect, *args, **kwargs):
self._simplex_transform = SimplexTransform()
self._labels = None
super(axes.Axes, self).__init__(fig, rect, *args, **kwargs)
def cla(self):
"""Clear the current axes."""
super(axes.Axes, self).cla()
self.set_aspect("equal")
self.get_xaxis().set_visible(False)
self.get_yaxis().set_visible(False)
self.patch.set_visible(False)
self.set_frame_on(False)
# draw invisiple vertices to set x/y limits of plot
self.scatter(Dynamics3x3Axes._VERTICES, alpha=0.)
self.margins(0.15)
self.bgpatch = self._create_bgpatch(
facecolor=rcParams["axes.facecolor"],
edgecolor=rcParams["axes.edgecolor"],
linewidth=rcParams["axes.linewidth"],
zorder=-1)
self.add_artist(self.bgpatch)
if rcParams["axes.grid"]:
self.grid = self._create_grid(
color=rcParams["grid.color"],
alpha=rcParams["grid.alpha"],
linestyle=rcParams["grid.linestyle"],
linewidth=rcParams["grid.linewidth"],
zorder=0)
self.add_collection(self.grid)
self.ticks, self.tick_labels = self._create_ticks(
color=rcParams["xtick.color"], zorder=0)
self.add_collection(self.ticks)
for label in self.tick_labels:
self.add_artist(label)
def _create_bgpatch(self, **kwargs):
codes = [Path.MOVETO] + [Path.LINETO] * 2 + [Path.CLOSEPOLY]
vertices = self._VERTICES + [self._VERTICES[0]]
vertices = self._simplex_transform.transform(np.array(vertices))
return PathPatch(Path(vertices, codes), **kwargs)
def _create_grid(self, step=0.2, **kwargs):
x = np.arange(step, 1., step)
n = x.shape[0]
line_start, line_end = np.zeros((n, 3)), np.zeros((n, 3))
line_start[:, 0] = line_end[::-1, 1] = x
line_start[:, 2] = line_end[::-1, 0] = 1. - x
segs = np.zeros((3 * n, 2, 2))
for i, perm in enumerate([(0, 2, 1), (1, 0, 2), (2, 1, 0)]):
start = self._simplex_transform.transform(line_start[:, perm])
end = self._simplex_transform.transform(line_end[:, perm])
segs[i * n:(i + 1) * n, 0, :], segs[i * n:(i + 1) * n, 1, :] = start, end
line_segments = LineCollection(segs, **kwargs)
return line_segments
def _create_ticks(self, step=0.2, tick_length=0.025, **kwargs):
x = np.arange(step, 1., step)
n = x.shape[0]
tick_start, tick_end = np.zeros((n, 3)), np.zeros((n, 3))
tick_start[:, 0] = x
tick_start[:, 2] = 1. - x
tick_end[:, 0] = x
tick_end[:, 2] = 1. - x + tick_length
tick_end[:, 1] = -tick_length
tick_labels = []
ha = ["center", "left", "right"]
va = ["top", "bottom", "center"]
rot = [-60, 60, 0]
segs = np.zeros((n * 3, 2, 2))
for i, perm in enumerate([(0, 2, 1), (1, 0, 2), (2, 1, 0)]):
start = self._simplex_transform.transform(tick_start[:, perm])
end = self._simplex_transform.transform(tick_end[:, perm])
segs[i * n:(i + 1) * n, 0, :], segs[i * n:(i + 1) * n, 1, :] = start, end
for j, x_ in enumerate(x):
tick_labels.append(
Text(
end[j, 0],
end[j, 1],
"{0:.1f}".format(x_),
horizontalalignment=ha[i],
verticalalignment=va[i],
rotation=rot[i],
color=kwargs["color"],
fontsize=rcParams["xtick.labelsize"]))
line_segments = LineCollection(segs, **kwargs)
return line_segments, tick_labels
def _create_labels(self, labels, padding):
artists = []
aligns = ["top", "top", "bottom"]
for label, pos, align in zip(labels, self._VERTICES, aligns):
x, y = self._simplex_transform.transform(pos)
labelpad = padding if align == "bottom" else -padding
label = Text(
x=x,
y=y + labelpad,
text=label,
fontproperties=font_manager.FontProperties(
size=rcParams["axes.labelsize"],
weight=rcParams["axes.labelweight"]),
color=rcParams["axes.labelcolor"],
verticalalignment=align,
horizontalalignment="center")
artists.append(label)
return artists
def get_labels(self):
return self._labels
def set_labels(self, labels, padding=0.02):
assert len(labels) == 3
if self._labels is None:
self._labels = self._create_labels(labels, padding)
for label in self._labels:
self.add_artist(label)
else:
for artist, label in zip(self._labels, labels):
artist.set_text(label)
labels = property(get_labels, set_labels)
def can_zoom(self):
return False
def can_pan(self):
return False
def plot(self, points, **kwargs):
"""Creates a line plot.
Args:
points: Points in policy space.
**kwargs: Additional keyword arguments passed on to `Axes.plot`.
"""
points = np.array(points)
assert points.shape[1] == 3
points = self._simplex_transform.transform(points)
return super(Dynamics3x3Axes, self).plot(points[:, 0], points[:, 1],
**kwargs)
def scatter(self, points, **kwargs):
"""Creates a scatter plot.
Args:
points: Points in policy space.
**kwargs: Additional keyword arguments passed on to `Axes.scatter`.
"""
points = np.array(points)
assert points.shape[1] == 3
points = self._simplex_transform.transform(points)
return super(Dynamics3x3Axes, self).scatter(points[:, 0], points[:, 1],
**kwargs)
def quiver(self,
dynamics,
step=0.05,
boundary=False,
normalize=False,
pivot="middle",
**kwargs):
"""Visualizes the dynamics as a directional field plot.
Args:
dynamics: Population dynamics of type `dynamics.SinglePopulationDynamics`.
step: Distance between arrows along one dimension.
boundary: Include arrows on the boundary/face of the simplex.
normalize: Normalize each arrow to unit-length.
pivot: In `{"tail", "middle", "tip"}`, optional, default: "middle". The
part of the arrow that is anchored to the X, Y grid. The arrow rotates
about this point.
**kwargs: Additional keyword arguments passed on to `Axes.quiver`.
Returns:
The `quiver.Quiver` object created by calling `Axes.quiver`.
"""
x = np.array([x for x in utils.grid_simplex(step=step, boundary=boundary)])
dx = np.apply_along_axis(dynamics, 1, x)
p = self._simplex_transform.transform(x)
v = self._simplex_transform.transform(dx)
x, y = p[:, 0], p[:, 1]
u, v = v[:, 0], v[:, 1]
if normalize:
norm = np.sqrt(u**2 + v**2)
u, v = u / norm, v / norm
if "pivot" not in kwargs:
kwargs["pivot"] = "middle"
return super(Dynamics3x3Axes, self).quiver(x, y, u, v, **kwargs)
def _linecollection(self, points, linewidth, color):
points = self._simplex_transform.transform(points).reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, linewidths=linewidth, color=color)
return lc
def _integrate(self, x, func, mask, dt, min_dist=0.01):
cells = []
trajectory = [x]
x_ = x
for dt in [dt, -dt]:
while not mask[x]:
cell = mask.index(x)
cells.append(cell)
while mask.index(x) == cell:
# integrate up to cell boundary
if np.sqrt(np.sum((x_ - x)**2)) > min_dist:
x_ = x
if dt > 0:
trajectory.append(x)
else:
trajectory.insert(0, x)
x = _rk12_step(func, x, dt=dt)
if dt > 0:
mask[trajectory[-1]] = True
else:
mask[trajectory[0]] = True
# restore to integrate backwards
if dt > 0. and len(cells):
trajectory.append(_rk12_step(func, x, dt=-dt))
mask[mask.point(cells[0])] = False
x = trajectory[0]
x_ = x
else:
trajectory.insert(0, _rk12_step(func, x, dt=-dt))
return (np.array(trajectory), cells) if len(trajectory) > 2 else None
def streamplot(self,
dynamics,
initial_points=None,
dt=0.01,
density=1.,
min_length=0.4,
linewidth=None,
color="k",
**kwargs):
"""Visualizes the dynamics as a streamline plot.
Mimics the visuales of `Axes.streamplot` for simplex plots.
Args:
dynamics: Population dynamics of type `dynamics.SinglePopulationDynamics`.
initial_points: Starting points for streamlines
dt: Integration step.
density: Controls the density of streamlines in the plot.
min_length: Streamslines with length < min_length will be discarded.
linewidth: In `{None, float, "velocity"}`, optional, default: None. If
`linewidth="velocity"`, line width is scaled by the velocity of the
dynamics. Defaults to `rcParams` if `linewidth=None`.
color: In `{None, string, (r,g,b), (r,g,b,a), "velocity"}`, default: None.
If `color="velocity"`, velocity of dynamics is used to color the
streamlines. Defaults to `rcParams` if `color=None`.
**kwargs: Additional keyword arguments passed on to `Axes.streamplot`.
Returns:
The `SimplexStreamMask`.
"""
mask = SimplexStreamMask(density=density)
trajectories = []
if initial_points is None:
eps = 0.1
initial_points = np.array([[1. - eps, eps / 2., eps / 2.],
[eps / 2., 1. - eps, eps / 2.],
[eps / 2., eps / 2., 1. - eps]])
initial_points = np.vstack(
(initial_points, utils.sample_from_simplex(100)))
# TODO: add heuristic for initial points
else:
initial_points = np.array(initial_points)
assert initial_points.ndim == 2
assert initial_points.shape[1] == 3
# generate trajectories
for p in initial_points:
# center initial point on grid cell
p = mask.point(mask.index(p))
res = self._integrate(p, dynamics, mask, dt=dt)
if res is not None:
t, cells = res
cum_len = np.cumsum(
np.sqrt(
np.diff(t[:, 0])**2 + np.diff(t[:, 1])**2 +
np.diff(t[:, 2])**2))
if cum_len[-1] < min_length:
for cell in cells:
mask[mask.point(cell)] = False
continue
trajectories.append(t)
lc_color = arrow_color = color
lc_linewidth = linewidth
if linewidth == "velocity" or color == "velocity":
vel_max = 0
vel_min = np.float("inf")
velocities = []
for t in trajectories:
dx = np.apply_along_axis(dynamics, 1, t)
vel = np.sqrt(np.sum(dx**2, axis=1))
vel_max = max(np.max(vel), vel_max)
vel_min = min(np.min(vel), vel_min)
velocities.append(vel)
# add trajectories to plot
for i, t in enumerate(trajectories):
cum_len = np.cumsum(
np.sqrt(
np.diff(t[:, 0])**2 + np.diff(t[:, 1])**2 + np.diff(t[:, 2])**2))
mid_idx = np.searchsorted(cum_len, cum_len[-1] / 2.)
if linewidth == "velocity" or color == "velocity":
vel = (velocities[i] - vel_min) / vel_max
if linewidth == "velocity":
lc_linewidth = 3. * vel + 0.5
if color == "velocity":
cmap = matplotlib.cm.get_cmap(rcParams["image.cmap"])
lc_color = cmap(vel)
arrow_color = cmap(vel[mid_idx])
lc = self._linecollection(t, linewidth=lc_linewidth, color=lc_color)
self.add_collection(lc)
# add arrow centered on trajectory
arrow_tail = self._simplex_transform.transform(t[mid_idx - 1])
arrow_head = self._simplex_transform.transform(t[mid_idx])
arrow_kw = dict(arrowstyle="-|>", mutation_scale=10 * 1.)
arrow_patch = FancyArrowPatch(
arrow_tail,
arrow_head,
linewidth=None,
color=arrow_color,
zorder=3,
**arrow_kw)
self.add_patch(arrow_patch)
return mask
projections.register_projection(Dynamics3x3Axes)
| 33.160596
| 80
| 0.61396
|
492ed9b98a31ccd2e738a01f499b9f4747676a35
| 11,725
|
py
|
Python
|
experiments/bst_dataset.py
|
shengwenbo/CopyNet
|
013508d10ad5ed09514b233a75e7f41ce7f8fa94
|
[
"MIT"
] | 193
|
2016-09-19T02:55:00.000Z
|
2022-03-10T13:43:50.000Z
|
experiments/bst_dataset.py
|
shengwenbo/CopyNet
|
013508d10ad5ed09514b233a75e7f41ce7f8fa94
|
[
"MIT"
] | 6
|
2016-11-25T03:38:23.000Z
|
2019-03-30T06:11:31.000Z
|
experiments/bst_dataset.py
|
shengwenbo/CopyNet
|
013508d10ad5ed09514b233a75e7f41ce7f8fa94
|
[
"MIT"
] | 77
|
2016-10-14T10:08:44.000Z
|
2021-03-11T05:01:27.000Z
|
# coding=utf-8
__author__ = 'jiataogu'
from emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file
import numpy.random as n_rng
class BSTnode(object):
"""
Representation of a node in a binary search tree.
Has a left child, right child, and key value, and stores its subtree size.
"""
def __init__(self, parent, t):
"""Create a new leaf with key t."""
self.key = t
self.parent = parent
self.left = None
self.right = None
self.size = 1
def update_stats(self):
"""Updates this node's size based on its children's sizes."""
self.size = (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size)
def insert(self, t, NodeType):
"""Insert key t into the subtree rooted at this node (updating subtree size)."""
self.size += 1
if t < self.key:
if self.left is None:
self.left = NodeType(self, t)
return self.left
else:
return self.left.insert(t, NodeType)
elif t > self.key:
if self.right is None:
self.right = NodeType(self, t)
return self.right
else:
return self.right.insert(t, NodeType)
else:
return self
def find(self, t):
"""Return the node for key t if it is in this tree, or None otherwise."""
if t == self.key:
return self
elif t < self.key:
if self.left is None:
return None
else:
return self.left.find(t)
else:
if self.right is None:
return None
else:
return self.right.find(t)
def rank(self, t):
"""Return the number of keys <= t in the subtree rooted at this node."""
left_size = 0 if self.left is None else self.left.size
if t == self.key:
return left_size + 1
elif t < self.key:
if self.left is None:
return 0
else:
return self.left.rank(t)
else:
if self.right is None:
return left_size + 1
else:
return self.right.rank(t) + left_size + 1
def minimum(self):
"""Returns the node with the smallest key in the subtree rooted by this node."""
current = self
while current.left is not None:
current = current.left
return current
def successor(self):
"""Returns the node with the smallest key larger than this node's key, or None if this has the largest key in the tree."""
if self.right is not None:
return self.right.minimum()
current = self
while current.parent is not None and current.parent.right is current:
current = current.parent
return current.parent
def delete(self):
""""Delete this node from the tree."""
if self.left is None or self.right is None:
if self is self.parent.left:
self.parent.left = self.left or self.right
if self.parent.left is not None:
self.parent.left.parent = self.parent
else:
self.parent.right = self.left or self.right
if self.parent.right is not None:
self.parent.right.parent = self.parent
current = self.parent
while current.key is not None:
current.update_stats()
current = current.parent
return self
else:
s = self.successor()
self.key, s.key = s.key, self.key
return s.delete()
def check(self, lokey, hikey):
"""Checks that the subtree rooted at t is a valid BST and all keys are between (lokey, hikey)."""
if lokey is not None and self.key <= lokey:
raise "BST RI violation"
if hikey is not None and self.key >= hikey:
raise "BST RI violation"
if self.left is not None:
if self.left.parent is not self:
raise "BST RI violation"
self.left.check(lokey, self.key)
if self.right is not None:
if self.right.parent is not self:
raise "BST RI violation"
self.right.check(self.key, hikey)
if self.size != 1 + (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size):
raise "BST RI violation"
def __repr__(self):
return "<BST Node, key:" + str(self.key) + ">"
class BST(object):
"""
Simple binary search tree implementation, augmented with subtree sizes.
This BST supports insert, find, and delete-min operations.
Each tree contains some (possibly 0) BSTnode objects, representing nodes,
and a pointer to the root.
"""
def __init__(self, NodeType=BSTnode):
self.root = None
self.NodeType = NodeType
self.psroot = self.NodeType(None, None)
def reroot(self):
self.root = self.psroot.left
def insert(self, t):
"""Insert key t into this BST, modifying it in-place."""
if self.root is None:
self.psroot.left = self.NodeType(self.psroot, t)
self.reroot()
return self.root
else:
return self.root.insert(t, self.NodeType)
def find(self, t):
"""Return the node for key t if is in the tree, or None otherwise."""
if self.root is None:
return None
else:
return self.root.find(t)
def rank(self, t):
"""The number of keys <= t in the tree."""
if self.root is None:
return 0
else:
return self.root.rank(t)
def delete(self, t):
"""Delete the node for key t if it is in the tree."""
node = self.find(t)
deleted = self.root.delete()
self.reroot()
return deleted
def check(self):
if self.root is not None:
self.root.check(None, None)
def __str__(self):
if self.root is None:
return '<empty tree>'
def nested(node):
if node is None:
return '0'
head = str(node.key)
left = nested(node.left)
right = nested(node.right)
if left == '0' and right == '0':
return head
else:
return ' '.join(['(', head, left, right, ')'])
return nested(self.root)
# def recurse(node):
# if node is None:
# return [], 0, 0
# label = str(node.key)
# left_lines, left_pos, left_width = recurse(node.left)
# right_lines, right_pos, right_width = recurse(node.right)
# middle = max(right_pos + left_width - left_pos + 1, len(label), 2)
# pos = left_pos + middle // 2
# width = left_pos + middle + right_width - right_pos
# while len(left_lines) < len(right_lines):
# left_lines.append(' ' * left_width)
# while len(right_lines) < len(left_lines):
# right_lines.append(' ' * right_width)
# if (middle - len(label)) % 2 == 1 and node.parent is not None and \
# node is node.parent.left and len(label) < middle:
# label += '.'
# label = label.center(middle, '.')
# if label[0] == '.': label = ' ' + label[1:]
# if label[-1] == '.': label = label[:-1] + ' '
# lines = [' ' * left_pos + label + ' ' * (right_width - right_pos),
# ' ' * left_pos + '/' + ' ' * (middle-2) +
# '\\' + ' ' * (right_width - right_pos)] + \
# [left_line + ' ' * (width - left_width - right_width) +
# right_line
# for left_line, right_line in zip(left_lines, right_lines)]
# return lines, pos, width
# return '\n'.join(recurse(self.root) [0])
test1 = range(0, 100, 10)
test2 = [31, 41, 59, 26, 53, 58, 97, 93, 23]
test3 = "algorithms"
def printsizes(node):
if node is None:
print "node is nil"
else:
print "node", node.key, "has a subtree of size", node.size
def test(args=None, BSTtype=BST):
import random, sys
random.seed(19920206)
if not args:
args = sys.argv[1:]
if not args:
print 'usage: %s <number-of-random-items | item item item ...>' % \
sys.argv[0]
sys.exit()
elif len(args) == 1:
items = (random.randrange(100) for i in xrange(int(args[0])))
else:
items = [int(i) for i in args]
tree = BSTtype()
source = []
for item in items:
tree.insert(item)
source += [str(item)]
print ' '.join(source)
print tree
def generate():
import random, sys
random.seed(19920206)
Lmin = 2 ** 2 - 1
Lmax = 2 ** 4 - 1
Xnum = 1000000
voc = 26
wfile = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'w')
for id in xrange(Xnum):
tree = BST()
items = (random.randrange(voc) for i in
xrange(random.randint(Lmin, Lmax)))
source = []
for item in items:
item = chr(item + 65)
tree.insert(item)
source += [str(item)]
source = ' '.join(source)
target = str(tree)
line = '{0} -> {1}'.format(source, target)
wfile.write(line + '\n')
if id % 10000 == 0:
print id
def obtain_dataset():
rfile = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'r')
line = rfile.readline()
word2idx = dict()
word2idx['<eol>'] = 0
word2idx['<unk>'] = 1
pairs = []
at = 2
lines = 0
while line:
lines += 1
line = line.strip()
source, target = line.split('->')
source = source.split()
target = target.split()
for w in source:
if w not in word2idx:
word2idx[w] = at
at += 1
for w in target:
if w not in word2idx:
word2idx[w] = at
at += 1
pairs.append((source, target))
if lines % 20000 == 0:
print lines
line = rfile.readline()
idx2word = dict()
for v, k in word2idx.items():
idx2word[k] = v
Lmax = len(idx2word)
print 'read dataset ok.'
print Lmax
for i in xrange(Lmax):
print idx2word[i]
def build_data(data):
instance = dict(text=[], summary=[], source=[], target=[], target_c=[])
for pair in data:
source, target = pair
A = [word2idx[w] for w in source]
B = [word2idx[w] for w in target]
# C = np.asarray([[w == l for w in source] for l in target], dtype='float32')
C = [0 if w not in source else source.index(w) + Lmax for w in target]
instance['text'] += [source]
instance['summary'] += [target]
instance['source'] += [A]
instance['target'] += [B]
# instance['cc_matrix'] += [C]
instance['target_c'] += [C]
print instance['target'][5000]
print instance['target_c'][5000]
return instance
train_set = build_data(pairs[100000:])
test_set = build_data(pairs[:100000])
serialize_to_file([train_set, test_set, idx2word, word2idx],
'/home/thoma/Work/Dial-DRL/dataset/BST_1M.data.pkl')
if __name__ == '__main__':
generate()
obtain_dataset()
| 32.751397
| 130
| 0.526994
|
de6841f12ec10aa5558a9656001452c965449fef
| 2,065
|
bzl
|
Python
|
test/starlark_tests/macos_ui_test_tests.bzl
|
LaudateCorpus1/rules_apple
|
f8b1da53a4b53af9c655a17a4e0cb86959c932d8
|
[
"Apache-2.0"
] | 2
|
2020-07-01T20:21:48.000Z
|
2021-04-28T21:28:49.000Z
|
test/starlark_tests/macos_ui_test_tests.bzl
|
LaudateCorpus1/rules_apple
|
f8b1da53a4b53af9c655a17a4e0cb86959c932d8
|
[
"Apache-2.0"
] | null | null | null |
test/starlark_tests/macos_ui_test_tests.bzl
|
LaudateCorpus1/rules_apple
|
f8b1da53a4b53af9c655a17a4e0cb86959c932d8
|
[
"Apache-2.0"
] | 2
|
2021-06-03T10:06:19.000Z
|
2022-02-02T14:23:53.000Z
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""macos_ui_test Starlark tests."""
load(
":rules/apple_verification_test.bzl",
"apple_verification_test",
)
load(
":rules/infoplist_contents_test.bzl",
"infoplist_contents_test",
)
def macos_ui_test_test_suite():
"""Test suite for macos_ui_test."""
name = "macos_ui_test"
apple_verification_test(
name = "{}_codesign_test".format(name),
build_type = "device",
target_under_test = "//test/starlark_tests/targets_under_test/macos:ui_test",
verifier_script = "verifier_scripts/codesign_verifier.sh",
)
infoplist_contents_test(
name = "{}_plist_test".format(name),
target_under_test = "//test/starlark_tests/targets_under_test/macos:ui_test",
expected_values = {
"BuildMachineOSBuild": "*",
"CFBundleExecutable": "ui_test",
"CFBundleIdentifier": "com.google.exampleTests",
"CFBundleName": "ui_test",
"CFBundleSupportedPlatforms:0": "MacOSX",
"DTCompiler": "com.apple.compilers.llvm.clang.1_0",
"DTPlatformBuild": "*",
"DTPlatformName": "macosx",
"DTPlatformVersion": "*",
"DTSDKBuild": "*",
"DTSDKName": "macosx*",
"DTXcode": "*",
"DTXcodeBuild": "*",
"LSMinimumSystemVersion": "10.10",
},
tags = [name],
)
native.test_suite(
name = name,
tags = [name],
)
| 32.777778
| 85
| 0.638257
|
034cfc5bb888e2dbbe7b9bb9421ae5a45836ec31
| 8,942
|
py
|
Python
|
shims/qpid-proton-python/src/amqp_types_test/Sender.py
|
gemmellr/qpid-interop-test
|
d6434c645637e3ecf2adab10baef7d1c51486746
|
[
"Apache-2.0"
] | null | null | null |
shims/qpid-proton-python/src/amqp_types_test/Sender.py
|
gemmellr/qpid-interop-test
|
d6434c645637e3ecf2adab10baef7d1c51486746
|
[
"Apache-2.0"
] | null | null | null |
shims/qpid-proton-python/src/amqp_types_test/Sender.py
|
gemmellr/qpid-interop-test
|
d6434c645637e3ecf2adab10baef7d1c51486746
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
AMQP type test sender shim for qpid-interop-test
"""
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import os.path
import signal
import struct
import sys
import traceback
import uuid
import proton
import proton.handlers
import proton.reactor
import _compat
class AmqpTypesTestSender(proton.handlers.MessagingHandler):
"""
Sender shim for AMQP types test
This shim receives the AMQP type and a list of test values. Each value is sent in a message body of the appropriate
AMQP type. There is no returned value.
"""
def __init__(self, broker_url, queue_name, amqp_type, test_value_list):
super(AmqpTypesTestSender, self).__init__()
self.broker_url = broker_url
self.queue_name = queue_name
self.amqp_type = amqp_type
self.test_value_list = test_value_list
self.sent = 0
self.confirmed = 0
self.total = len(test_value_list)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
def on_start(self, event):
"""Event callback for when the client starts"""
connection = event.container.connect(url=self.broker_url, sasl_enabled=False, reconnect=False)
event.container.create_sender(connection, target=self.queue_name)
def on_sendable(self, event):
"""Event callback for when send credit is received, allowing the sending of messages"""
if self.sent == 0:
for test_value in self.test_value_list:
if event.sender.credit:
message = self.create_message(test_value)
if message is not None:
event.sender.send(message)
self.sent += 1
else:
event.connection.close()
return
def create_message(self, test_value):
"""
Creates a single message with the test value translated from its string representation to the appropriate
AMQP value (set in self.amqp_type).
"""
return proton.Message(id=(self.sent+1), body=self.encode_amqp_type(self.amqp_type, test_value))
@staticmethod
def encode_amqp_type(amqp_type, test_value):
"""Encode an AMQP type from a stringified test_value"""
if amqp_type == 'null':
return None
if amqp_type == 'boolean':
return True if test_value == 'True' else False
if amqp_type == 'ubyte':
return proton.ubyte(int(test_value, 16))
if amqp_type == 'ushort':
return proton.ushort(int(test_value, 16))
if amqp_type == 'uint':
return proton.uint(int(test_value, 16))
if amqp_type == 'ulong':
return proton.ulong(int(test_value, 16))
if amqp_type == 'byte':
return proton.byte(int(test_value, 16))
if amqp_type == 'short':
return proton.short(int(test_value, 16))
if amqp_type == 'int':
return proton.int32(int(test_value, 16))
if amqp_type == 'long':
return _compat.str2long(test_value, 16)
if amqp_type == 'float':
return proton.float32(struct.unpack('!f', _compat.decode_hex(test_value[2:]))[0])
if amqp_type == 'double':
return struct.unpack('!d', _compat.decode_hex(test_value[2:]))[0]
if amqp_type == 'decimal32':
return proton.decimal32(int(test_value[2:], 16))
if amqp_type == 'decimal64':
return proton.decimal64(_compat.str2long(test_value[2:], 16))
if amqp_type == 'decimal128':
return proton.decimal128(_compat.decode_hex(test_value[2:]))
if amqp_type == 'char':
if len(test_value) == 1: # Format 'a'
return proton.char(test_value)
return proton.char(_compat.unichr(int(test_value, 16)))
if amqp_type == 'timestamp':
return proton.timestamp(int(test_value, 16))
if amqp_type == 'uuid':
return uuid.UUID(test_value)
if amqp_type == 'binary':
return test_value.encode('utf-8')
if amqp_type == 'string':
return _compat.unicode(test_value)
if amqp_type == 'symbol':
return proton.symbol(test_value)
if amqp_type == 'list':
return AmqpTypesTestSender.encode_amqp_list(test_value)
if amqp_type == 'map':
return AmqpTypesTestSender.encode_amqp_map(test_value)
if amqp_type == 'array':
#return AmqpTypesTestSender.encode_amqp_array(test_value)
print('send: Unsupported AMQP type "%s"' % amqp_type)
return None
print('send: Unknown AMQP type "%s"' % amqp_type)
return None
@staticmethod
def encode_complex_amqp_element(test_element, make_hashable=False):
"""
Encode a single complex AMQP element (ie list or array member, map key or value)
A complex element may be one of:
str/unicode: 'amqp_type:amqp_value'
list: [...]
dict: {...}
"""
if _compat.IS_PY3:
is_string = isinstance(test_element, str)
else:
is_string = isinstance(test_element, unicode)
if is_string:
split_list = test_element.split(':', 1)
return AmqpTypesTestSender.encode_amqp_type(split_list[0], split_list[1])
if isinstance(test_element, list):
enc_list = AmqpTypesTestSender.encode_amqp_list(test_element)
if make_hashable:
return tuple(enc_list) # Convert list to tuple
return enc_list
if isinstance(test_element, dict):
enc_dict = AmqpTypesTestSender.encode_amqp_map(test_element)
if make_hashable:
return tuple(enc_dict.items()) # Convert to tuple of k,v pairs
return enc_dict
else:
print('Unexpected complex amqp element type: %s, value=%s' % (type(test_element), str(test_element)))
@staticmethod
def encode_amqp_list(test_value):
"""
Encode an AMQP list from the format [val1, val2, ...]
Each val is in the string format amqp_type:amqp_val_as_str
"""
val_list = []
for val in test_value:
val_list.append(AmqpTypesTestSender.encode_complex_amqp_element(val))
return val_list
@staticmethod
def encode_amqp_map(test_value):
"""Encode an AMQP map from the format {key1:val1, key2:val2, ...}"""
val_map = {}
for key, val in test_value.items():
encoded_key = AmqpTypesTestSender.encode_complex_amqp_element(key, True) # make keys hashable
encoded_val = AmqpTypesTestSender.encode_complex_amqp_element(val)
val_map[encoded_key] = encoded_val
return val_map
@staticmethod
def encode_amqp_array(test_value):
"""Encode an AMQP array"""
return test_value
def on_accepted(self, event):
"""Event callback for when a sent message is accepted by the broker"""
self.confirmed += 1
if self.confirmed == self.total:
event.connection.close()
def on_disconnected(self, event):
"""Event callback for when the broker disconnects with the client"""
self.sent = self.confirmed
def on_transport_error(self, event):
print('Sender: Broker not found at %s' % self.broker_url)
@staticmethod
def signal_handler(signal_number, _):
"""Signal handler"""
if signal_number in [signal.SIGTERM, signal.SIGINT]:
print('Sender: received signal %d, terminating' % signal_number)
sys.exit(1)
# --- main ---
# Args: 1: Broker address (ip-addr:port)
# 2: Queue name
# 3: AMQP type
# 4...n: Test value(s) as strings
try:
SENDER = AmqpTypesTestSender(sys.argv[1], sys.argv[2], sys.argv[3], json.loads(sys.argv[4]))
proton.reactor.Container(SENDER).run()
except KeyboardInterrupt:
pass
except Exception as exc:
print(os.path.basename(sys.argv[0]), 'EXCEPTION:', exc)
print(traceback.format_exc())
sys.exit(1)
| 38.709957
| 119
| 0.634981
|
945b04381e91a803c439ea317fd8a91a92485950
| 360
|
py
|
Python
|
host1/webapp1/serializers.py
|
muzudho/django-practice
|
1899b165ef4b04478ced87b784cda27e2a16ecf3
|
[
"MIT"
] | null | null | null |
host1/webapp1/serializers.py
|
muzudho/django-practice
|
1899b165ef4b04478ced87b784cda27e2a16ecf3
|
[
"MIT"
] | null | null | null |
host1/webapp1/serializers.py
|
muzudho/django-practice
|
1899b165ef4b04478ced87b784cda27e2a16ecf3
|
[
"MIT"
] | null | null | null |
# from django.contrib.auth.admin import User
from django.contrib.auth.models import User
from django_grpc_framework import proto_serializers
import account_pb2
class UserProtoSerializer(proto_serializers.ModelProtoSerializer):
class Meta:
model = User
proto_class = account_pb2.User
fields = ['id', 'username', 'email', 'groups']
| 30
| 66
| 0.75
|
1a29d148d9e311fc7efcebf8eea7ddc781d5ed53
| 36,168
|
py
|
Python
|
test/run_tests.py
|
smitlimbani/vpp
|
d5403ac610c985acc780e625b4328d001efd0e63
|
[
"Apache-2.0"
] | 1
|
2019-10-10T17:42:08.000Z
|
2019-10-10T17:42:08.000Z
|
test/run_tests.py
|
smitlimbani/vpp
|
d5403ac610c985acc780e625b4328d001efd0e63
|
[
"Apache-2.0"
] | null | null | null |
test/run_tests.py
|
smitlimbani/vpp
|
d5403ac610c985acc780e625b4328d001efd0e63
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import shutil
import os
import fnmatch
import unittest
import argparse
import time
import threading
import signal
import psutil
import re
import multiprocessing
from multiprocessing import Process, Pipe, cpu_count
from multiprocessing.queues import Queue
from multiprocessing.managers import BaseManager
import framework
from framework import VppTestRunner, running_extended_tests, VppTestCase, \
get_testcase_doc_name, get_test_description, PASS, FAIL, ERROR, SKIP, \
TEST_RUN
from debug import spawn_gdb
from log import get_parallel_logger, double_line_delim, RED, YELLOW, GREEN, \
colorize, single_line_delim
from discover_tests import discover_tests
from subprocess import check_output, CalledProcessError
from util import check_core_path, get_core_path, is_core_present
# timeout which controls how long the child has to finish after seeing
# a core dump in test temporary directory. If this is exceeded, parent assumes
# that child process is stuck (e.g. waiting for shm mutex, which will never
# get unlocked) and kill the child
core_timeout = 3
min_req_shm = 536870912 # min 512MB shm required
# 128MB per extra process
shm_per_process = 134217728
class StreamQueue(Queue):
def write(self, msg):
self.put(msg)
def flush(self):
sys.__stdout__.flush()
sys.__stderr__.flush()
def fileno(self):
return self._writer.fileno()
class StreamQueueManager(BaseManager):
pass
StreamQueueManager.register('StreamQueue', StreamQueue)
class TestResult(dict):
def __init__(self, testcase_suite, testcases_by_id=None):
super(TestResult, self).__init__()
self[PASS] = []
self[FAIL] = []
self[ERROR] = []
self[SKIP] = []
self[TEST_RUN] = []
self.crashed = False
self.testcase_suite = testcase_suite
self.testcases = [testcase for testcase in testcase_suite]
self.testcases_by_id = testcases_by_id
def was_successful(self):
return 0 == len(self[FAIL]) == len(self[ERROR]) \
and len(self[PASS] + self[SKIP]) \
== self.testcase_suite.countTestCases() == len(self[TEST_RUN])
def no_tests_run(self):
return 0 == len(self[TEST_RUN])
def process_result(self, test_id, result):
self[result].append(test_id)
def suite_from_failed(self):
rerun_ids = set([])
for testcase in self.testcase_suite:
tc_id = testcase.id()
if tc_id not in self[PASS] and tc_id not in self[SKIP]:
rerun_ids.add(tc_id)
if rerun_ids:
return suite_from_failed(self.testcase_suite, rerun_ids)
def get_testcase_names(self, test_id):
# could be tearDownClass (test_ipsec_esp.TestIpsecEsp1)
setup_teardown_match = re.match(
r'((tearDownClass)|(setUpClass)) \((.+\..+)\)', test_id)
if setup_teardown_match:
test_name, _, _, testcase_name = setup_teardown_match.groups()
if len(testcase_name.split('.')) == 2:
for key in self.testcases_by_id.keys():
if key.startswith(testcase_name):
testcase_name = key
break
testcase_name = self._get_testcase_doc_name(testcase_name)
else:
test_name = self._get_test_description(test_id)
testcase_name = self._get_testcase_doc_name(test_id)
return testcase_name, test_name
def _get_test_description(self, test_id):
if test_id in self.testcases_by_id:
desc = get_test_description(descriptions,
self.testcases_by_id[test_id])
else:
desc = test_id
return desc
def _get_testcase_doc_name(self, test_id):
if test_id in self.testcases_by_id:
doc_name = get_testcase_doc_name(self.testcases_by_id[test_id])
else:
doc_name = test_id
return doc_name
def test_runner_wrapper(suite, keep_alive_pipe, stdouterr_queue,
finished_pipe, result_pipe, logger):
sys.stdout = stdouterr_queue
sys.stderr = stdouterr_queue
VppTestCase.parallel_handler = logger.handlers[0]
result = VppTestRunner(keep_alive_pipe=keep_alive_pipe,
descriptions=descriptions,
verbosity=verbose,
result_pipe=result_pipe,
failfast=failfast,
print_summary=False).run(suite)
finished_pipe.send(result.wasSuccessful())
finished_pipe.close()
keep_alive_pipe.close()
class TestCaseWrapper(object):
def __init__(self, testcase_suite, manager):
self.keep_alive_parent_end, self.keep_alive_child_end = Pipe(
duplex=False)
self.finished_parent_end, self.finished_child_end = Pipe(duplex=False)
self.result_parent_end, self.result_child_end = Pipe(duplex=False)
self.testcase_suite = testcase_suite
if sys.version[0] == '2':
self.stdouterr_queue = manager.StreamQueue()
else:
from multiprocessing import get_context
self.stdouterr_queue = manager.StreamQueue(ctx=get_context())
self.logger = get_parallel_logger(self.stdouterr_queue)
self.child = Process(target=test_runner_wrapper,
args=(testcase_suite,
self.keep_alive_child_end,
self.stdouterr_queue,
self.finished_child_end,
self.result_child_end,
self.logger)
)
self.child.start()
self.last_test_temp_dir = None
self.last_test_vpp_binary = None
self._last_test = None
self.last_test_id = None
self.vpp_pid = None
self.last_heard = time.time()
self.core_detected_at = None
self.testcases_by_id = {}
self.testclasess_with_core = {}
for testcase in self.testcase_suite:
self.testcases_by_id[testcase.id()] = testcase
self.result = TestResult(testcase_suite, self.testcases_by_id)
@property
def last_test(self):
return self._last_test
@last_test.setter
def last_test(self, test_id):
self.last_test_id = test_id
if test_id in self.testcases_by_id:
testcase = self.testcases_by_id[test_id]
self._last_test = testcase.shortDescription()
if not self._last_test:
self._last_test = str(testcase)
else:
self._last_test = test_id
def add_testclass_with_core(self):
if self.last_test_id in self.testcases_by_id:
test = self.testcases_by_id[self.last_test_id]
class_name = unittest.util.strclass(test.__class__)
test_name = "'{}' ({})".format(get_test_description(descriptions,
test),
self.last_test_id)
else:
test_name = self.last_test_id
class_name = re.match(r'((tearDownClass)|(setUpClass)) '
r'\((.+\..+)\)', test_name).groups()[3]
if class_name not in self.testclasess_with_core:
self.testclasess_with_core[class_name] = (
test_name,
self.last_test_vpp_binary,
self.last_test_temp_dir)
def close_pipes(self):
self.keep_alive_child_end.close()
self.finished_child_end.close()
self.result_child_end.close()
self.keep_alive_parent_end.close()
self.finished_parent_end.close()
self.result_parent_end.close()
def was_successful(self):
return self.result.was_successful()
def stdouterr_reader_wrapper(unread_testcases, finished_unread_testcases,
read_testcases):
read_testcase = None
while read_testcases.is_set() or unread_testcases:
if finished_unread_testcases:
read_testcase = finished_unread_testcases.pop()
unread_testcases.remove(read_testcase)
elif unread_testcases:
read_testcase = unread_testcases.pop()
if read_testcase:
data = ''
while data is not None:
sys.stdout.write(data)
data = read_testcase.stdouterr_queue.get()
read_testcase.stdouterr_queue.close()
finished_unread_testcases.discard(read_testcase)
read_testcase = None
def handle_failed_suite(logger, last_test_temp_dir, vpp_pid):
if last_test_temp_dir:
# Need to create link in case of a timeout or core dump without failure
lttd = os.path.basename(last_test_temp_dir)
failed_dir = os.getenv('FAILED_DIR')
link_path = '%s%s-FAILED' % (failed_dir, lttd)
if not os.path.exists(link_path):
os.symlink(last_test_temp_dir, link_path)
logger.error("Symlink to failed testcase directory: %s -> %s"
% (link_path, lttd))
# Report core existence
core_path = get_core_path(last_test_temp_dir)
if os.path.exists(core_path):
logger.error(
"Core-file exists in test temporary directory: %s!" %
core_path)
check_core_path(logger, core_path)
logger.debug("Running 'file %s':" % core_path)
try:
info = check_output(["file", core_path])
logger.debug(info)
except CalledProcessError as e:
logger.error("Subprocess returned with return code "
"while running `file' utility on core-file "
"returned: "
"rc=%s", e.returncode)
except OSError as e:
logger.error("Subprocess returned with OS error while "
"running 'file' utility "
"on core-file: "
"(%s) %s", e.errno, e.strerror)
except Exception as e:
logger.exception("Unexpected error running `file' utility "
"on core-file")
logger.error("gdb %s %s" %
(os.getenv('VPP_BIN', 'vpp'), core_path))
if vpp_pid:
# Copy api post mortem
api_post_mortem_path = "/tmp/api_post_mortem.%d" % vpp_pid
if os.path.isfile(api_post_mortem_path):
logger.error("Copying api_post_mortem.%d to %s" %
(vpp_pid, last_test_temp_dir))
shutil.copy2(api_post_mortem_path, last_test_temp_dir)
def check_and_handle_core(vpp_binary, tempdir, core_crash_test):
if is_core_present(tempdir):
if debug_core:
print('VPP core detected in %s. Last test running was %s' %
(tempdir, core_crash_test))
print(single_line_delim)
spawn_gdb(vpp_binary, get_core_path(tempdir))
print(single_line_delim)
elif compress_core:
print("Compressing core-file in test directory `%s'" % tempdir)
os.system("gzip %s" % get_core_path(tempdir))
def handle_cores(failed_testcases):
for failed_testcase in failed_testcases:
tcs_with_core = failed_testcase.testclasess_with_core
if tcs_with_core:
for test, vpp_binary, tempdir in tcs_with_core.values():
check_and_handle_core(vpp_binary, tempdir, test)
def process_finished_testsuite(wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results):
results.append(wrapped_testcase_suite.result)
finished_testcase_suites.add(wrapped_testcase_suite)
stop_run = False
if failfast and not wrapped_testcase_suite.was_successful():
stop_run = True
if not wrapped_testcase_suite.was_successful():
failed_wrapped_testcases.add(wrapped_testcase_suite)
handle_failed_suite(wrapped_testcase_suite.logger,
wrapped_testcase_suite.last_test_temp_dir,
wrapped_testcase_suite.vpp_pid)
return stop_run
def run_forked(testcase_suites):
wrapped_testcase_suites = set()
solo_testcase_suites = []
total_test_runners = 0
# suites are unhashable, need to use list
results = []
unread_testcases = set()
finished_unread_testcases = set()
manager = StreamQueueManager()
manager.start()
total_test_runners = 0
while total_test_runners < concurrent_tests:
if testcase_suites:
a_suite = testcase_suites.pop(0)
if a_suite.is_tagged_run_solo:
solo_testcase_suites.append(a_suite)
continue
wrapped_testcase_suite = TestCaseWrapper(a_suite,
manager)
wrapped_testcase_suites.add(wrapped_testcase_suite)
unread_testcases.add(wrapped_testcase_suite)
total_test_runners = total_test_runners + 1
else:
break
while total_test_runners < 1 and solo_testcase_suites:
if solo_testcase_suites:
a_suite = solo_testcase_suites.pop(0)
wrapped_testcase_suite = TestCaseWrapper(a_suite,
manager)
wrapped_testcase_suites.add(wrapped_testcase_suite)
unread_testcases.add(wrapped_testcase_suite)
total_test_runners = total_test_runners + 1
else:
break
read_from_testcases = threading.Event()
read_from_testcases.set()
stdouterr_thread = threading.Thread(target=stdouterr_reader_wrapper,
args=(unread_testcases,
finished_unread_testcases,
read_from_testcases))
stdouterr_thread.start()
failed_wrapped_testcases = set()
stop_run = False
try:
while wrapped_testcase_suites:
finished_testcase_suites = set()
for wrapped_testcase_suite in wrapped_testcase_suites:
while wrapped_testcase_suite.result_parent_end.poll():
wrapped_testcase_suite.result.process_result(
*wrapped_testcase_suite.result_parent_end.recv())
wrapped_testcase_suite.last_heard = time.time()
while wrapped_testcase_suite.keep_alive_parent_end.poll():
wrapped_testcase_suite.last_test, \
wrapped_testcase_suite.last_test_vpp_binary, \
wrapped_testcase_suite.last_test_temp_dir, \
wrapped_testcase_suite.vpp_pid = \
wrapped_testcase_suite.keep_alive_parent_end.recv()
wrapped_testcase_suite.last_heard = time.time()
if wrapped_testcase_suite.finished_parent_end.poll():
wrapped_testcase_suite.finished_parent_end.recv()
wrapped_testcase_suite.last_heard = time.time()
stop_run = process_finished_testsuite(
wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results) or stop_run
continue
fail = False
if wrapped_testcase_suite.last_heard + test_timeout < \
time.time():
fail = True
wrapped_testcase_suite.logger.critical(
"Child test runner process timed out "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
elif not wrapped_testcase_suite.child.is_alive():
fail = True
wrapped_testcase_suite.logger.critical(
"Child test runner process unexpectedly died "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
elif wrapped_testcase_suite.last_test_temp_dir and \
wrapped_testcase_suite.last_test_vpp_binary:
if is_core_present(
wrapped_testcase_suite.last_test_temp_dir):
wrapped_testcase_suite.add_testclass_with_core()
if wrapped_testcase_suite.core_detected_at is None:
wrapped_testcase_suite.core_detected_at = \
time.time()
elif wrapped_testcase_suite.core_detected_at + \
core_timeout < time.time():
wrapped_testcase_suite.logger.critical(
"Child test runner process unresponsive and "
"core-file exists in test temporary directory "
"(last test running was `%s' in `%s')!" %
(wrapped_testcase_suite.last_test,
wrapped_testcase_suite.last_test_temp_dir))
fail = True
if fail:
wrapped_testcase_suite.child.terminate()
try:
# terminating the child process tends to leave orphan
# VPP process around
if wrapped_testcase_suite.vpp_pid:
os.kill(wrapped_testcase_suite.vpp_pid,
signal.SIGTERM)
except OSError:
# already dead
pass
wrapped_testcase_suite.result.crashed = True
wrapped_testcase_suite.result.process_result(
wrapped_testcase_suite.last_test_id, ERROR)
stop_run = process_finished_testsuite(
wrapped_testcase_suite,
finished_testcase_suites,
failed_wrapped_testcases,
results) or stop_run
for finished_testcase in finished_testcase_suites:
# Somewhat surprisingly, the join below may
# timeout, even if client signaled that
# it finished - so we note it just in case.
join_start = time.time()
finished_testcase.child.join(test_finished_join_timeout)
join_end = time.time()
if join_end - join_start >= test_finished_join_timeout:
finished_testcase.logger.error(
"Timeout joining finished test: %s (pid %d)" %
(finished_testcase.last_test,
finished_testcase.child.pid))
finished_testcase.close_pipes()
wrapped_testcase_suites.remove(finished_testcase)
finished_unread_testcases.add(finished_testcase)
finished_testcase.stdouterr_queue.put(None)
total_test_runners = total_test_runners - 1
if stop_run:
while testcase_suites:
results.append(TestResult(testcase_suites.pop(0)))
elif testcase_suites:
a_testcase = testcase_suites.pop(0)
while a_testcase and a_testcase.is_tagged_run_solo:
solo_testcase_suites.append(a_testcase)
if testcase_suites:
a_testcase = testcase_suites.pop(0)
else:
a_testcase = None
if a_testcase:
new_testcase = TestCaseWrapper(a_testcase,
manager)
wrapped_testcase_suites.add(new_testcase)
total_test_runners = total_test_runners + 1
unread_testcases.add(new_testcase)
if solo_testcase_suites and total_test_runners == 0:
a_testcase = solo_testcase_suites.pop(0)
new_testcase = TestCaseWrapper(a_testcase,
manager)
wrapped_testcase_suites.add(new_testcase)
total_test_runners = total_test_runners + 1
unread_testcases.add(new_testcase)
time.sleep(0.1)
except Exception:
for wrapped_testcase_suite in wrapped_testcase_suites:
wrapped_testcase_suite.child.terminate()
wrapped_testcase_suite.stdouterr_queue.put(None)
raise
finally:
read_from_testcases.clear()
stdouterr_thread.join(test_timeout)
manager.shutdown()
handle_cores(failed_wrapped_testcases)
return results
class SplitToSuitesCallback:
def __init__(self, filter_callback):
self.suites = {}
self.suite_name = 'default'
self.filter_callback = filter_callback
self.filtered = unittest.TestSuite()
def __call__(self, file_name, cls, method):
test_method = cls(method)
if self.filter_callback(file_name, cls.__name__, method):
self.suite_name = file_name + cls.__name__
if self.suite_name not in self.suites:
self.suites[self.suite_name] = unittest.TestSuite()
self.suites[self.suite_name].is_tagged_run_solo = False
self.suites[self.suite_name].addTest(test_method)
if test_method.is_tagged_run_solo():
self.suites[self.suite_name].is_tagged_run_solo = True
else:
self.filtered.addTest(test_method)
test_option = "TEST"
def parse_test_option():
f = os.getenv(test_option, None)
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
if filter_file_name:
filter_file_name = '%s.py' % filter_file_name
return filter_file_name, filter_class_name, filter_func_name
def filter_tests(tests, filter_cb):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = filter_tests(t, filter_cb)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if not filter_cb(parts[0], parts[1], parts[2]):
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
class FilterByTestOption:
def __init__(self, filter_file_name, filter_class_name, filter_func_name):
self.filter_file_name = filter_file_name
self.filter_class_name = filter_class_name
self.filter_func_name = filter_func_name
def __call__(self, file_name, class_name, func_name):
if self.filter_file_name:
fn_match = fnmatch.fnmatch(file_name, self.filter_file_name)
if not fn_match:
return False
if self.filter_class_name and class_name != self.filter_class_name:
return False
if self.filter_func_name and func_name != self.filter_func_name:
return False
return True
class FilterByClassList:
def __init__(self, classes_with_filenames):
self.classes_with_filenames = classes_with_filenames
def __call__(self, file_name, class_name, func_name):
return '.'.join([file_name, class_name]) in self.classes_with_filenames
def suite_from_failed(suite, failed):
failed = {x.rsplit('.', 1)[0] for x in failed}
filter_cb = FilterByClassList(failed)
suite = filter_tests(suite, filter_cb)
return suite
class AllResults(dict):
def __init__(self):
super(AllResults, self).__init__()
self.all_testcases = 0
self.results_per_suite = []
self[PASS] = 0
self[FAIL] = 0
self[ERROR] = 0
self[SKIP] = 0
self[TEST_RUN] = 0
self.rerun = []
self.testsuites_no_tests_run = []
def add_results(self, result):
self.results_per_suite.append(result)
result_types = [PASS, FAIL, ERROR, SKIP, TEST_RUN]
for result_type in result_types:
self[result_type] += len(result[result_type])
def add_result(self, result):
retval = 0
self.all_testcases += result.testcase_suite.countTestCases()
self.add_results(result)
if result.no_tests_run():
self.testsuites_no_tests_run.append(result.testcase_suite)
if result.crashed:
retval = -1
else:
retval = 1
elif not result.was_successful():
retval = 1
if retval != 0:
self.rerun.append(result.testcase_suite)
return retval
def print_results(self):
print('')
print(double_line_delim)
print('TEST RESULTS:')
print(' Scheduled tests: {}'.format(self.all_testcases))
print(' Executed tests: {}'.format(self[TEST_RUN]))
print(' Passed tests: {}'.format(
colorize(str(self[PASS]), GREEN)))
if self[SKIP] > 0:
print(' Skipped tests: {}'.format(
colorize(str(self[SKIP]), YELLOW)))
if self.not_executed > 0:
print(' Not Executed tests: {}'.format(
colorize(str(self.not_executed), RED)))
if self[FAIL] > 0:
print(' Failures: {}'.format(
colorize(str(self[FAIL]), RED)))
if self[ERROR] > 0:
print(' Errors: {}'.format(
colorize(str(self[ERROR]), RED)))
if self.all_failed > 0:
print('FAILURES AND ERRORS IN TESTS:')
for result in self.results_per_suite:
failed_testcase_ids = result[FAIL]
errored_testcase_ids = result[ERROR]
old_testcase_name = None
if failed_testcase_ids:
for failed_test_id in failed_testcase_ids:
new_testcase_name, test_name = \
result.get_testcase_names(failed_test_id)
if new_testcase_name != old_testcase_name:
print(' Testcase name: {}'.format(
colorize(new_testcase_name, RED)))
old_testcase_name = new_testcase_name
print(' FAILURE: {} [{}]'.format(
colorize(test_name, RED), failed_test_id))
if errored_testcase_ids:
for errored_test_id in errored_testcase_ids:
new_testcase_name, test_name = \
result.get_testcase_names(errored_test_id)
if new_testcase_name != old_testcase_name:
print(' Testcase name: {}'.format(
colorize(new_testcase_name, RED)))
old_testcase_name = new_testcase_name
print(' ERROR: {} [{}]'.format(
colorize(test_name, RED), errored_test_id))
if self.testsuites_no_tests_run:
print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:')
tc_classes = set()
for testsuite in self.testsuites_no_tests_run:
for testcase in testsuite:
tc_classes.add(get_testcase_doc_name(testcase))
for tc_class in tc_classes:
print(' {}'.format(colorize(tc_class, RED)))
print(double_line_delim)
print('')
@property
def not_executed(self):
return self.all_testcases - self[TEST_RUN]
@property
def all_failed(self):
return self[FAIL] + self[ERROR]
def parse_results(results):
"""
Prints the number of scheduled, executed, not executed, passed, failed,
errored and skipped tests and details about failed and errored tests.
Also returns all suites where any test failed.
:param results:
:return:
"""
results_per_suite = AllResults()
crashed = False
failed = False
for result in results:
result_code = results_per_suite.add_result(result)
if result_code == 1:
failed = True
elif result_code == -1:
crashed = True
results_per_suite.print_results()
if crashed:
return_code = -1
elif failed:
return_code = 1
else:
return_code = 0
return return_code, results_per_suite.rerun
def parse_digit_env(env_var, default):
value = os.getenv(env_var, default)
if value != default:
if value.isdigit():
value = int(value)
else:
print('WARNING: unsupported value "%s" for env var "%s",'
'defaulting to %s' % (value, env_var, default))
value = default
return value
if __name__ == '__main__':
verbose = parse_digit_env("V", 0)
test_timeout = parse_digit_env("TIMEOUT", 600) # default = 10 minutes
test_finished_join_timeout = 15
retries = parse_digit_env("RETRIES", 0)
debug = os.getenv("DEBUG", "n").lower() in ["gdb", "gdbserver"]
debug_core = os.getenv("DEBUG", "").lower() == "core"
compress_core = framework.BoolEnvironmentVariable("CORE_COMPRESS")
step = framework.BoolEnvironmentVariable("STEP")
force_foreground = framework.BoolEnvironmentVariable("FORCE_FOREGROUND")
run_interactive = debug or step or force_foreground
try:
num_cpus = len(os.sched_getaffinity(0))
except AttributeError:
num_cpus = multiprocessing.cpu_count()
shm_free = psutil.disk_usage('/dev/shm').free
print('OS reports %s available cpu(s). Free shm: %s' % (
num_cpus, "{:,}MB".format(shm_free / (1024 * 1024))))
test_jobs = os.getenv("TEST_JOBS", "1").lower() # default = 1 process
if test_jobs == 'auto':
if run_interactive:
concurrent_tests = 1
print('Interactive mode required, running on one core')
else:
shm_max_processes = 1
if shm_free < min_req_shm:
raise Exception('Not enough free space in /dev/shm. Required '
'free space is at least %sM.'
% (min_req_shm >> 20))
else:
extra_shm = shm_free - min_req_shm
shm_max_processes += extra_shm // shm_per_process
concurrent_tests = min(cpu_count(), shm_max_processes)
print('Found enough resources to run tests with %s cores'
% concurrent_tests)
elif test_jobs.isdigit():
concurrent_tests = int(test_jobs)
print("Running on %s core(s) as set by 'TEST_JOBS'." %
concurrent_tests)
else:
concurrent_tests = 1
print('Running on one core.')
if run_interactive and concurrent_tests > 1:
raise NotImplementedError(
'Running tests interactively (DEBUG is gdb or gdbserver or STEP '
'is set) in parallel (TEST_JOBS is more than 1) is not supported')
parser = argparse.ArgumentParser(description="VPP unit tests")
parser.add_argument("-f", "--failfast", action='store_true',
help="fast failure flag")
parser.add_argument("-d", "--dir", action='append', type=str,
help="directory containing test files "
"(may be specified multiple times)")
args = parser.parse_args()
failfast = args.failfast
descriptions = True
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filter_cb = FilterByTestOption(filter_file, filter_class, filter_func)
ignore_path = os.getenv("VENV_PATH", None)
cb = SplitToSuitesCallback(filter_cb)
for d in args.dir:
print("Adding tests from directory tree %s" % d)
discover_tests(d, cb, ignore_path)
# suites are not hashable, need to use list
suites = []
tests_amount = 0
for testcase_suite in cb.suites.values():
tests_amount += testcase_suite.countTestCases()
suites.append(testcase_suite)
print("%s out of %s tests match specified filters" % (
tests_amount, tests_amount + cb.filtered.countTestCases()))
if not running_extended_tests:
print("Not running extended tests (some tests will be skipped)")
attempts = retries + 1
if attempts > 1:
print("Perform %s attempts to pass the suite..." % attempts)
if run_interactive and suites:
# don't fork if requiring interactive terminal
print('Running tests in foreground in the current process')
full_suite = unittest.TestSuite()
full_suite.addTests(suites)
result = VppTestRunner(verbosity=verbose,
failfast=failfast,
print_summary=True).run(full_suite)
was_successful = result.wasSuccessful()
if not was_successful:
for test_case_info in result.failed_test_cases_info:
handle_failed_suite(test_case_info.logger,
test_case_info.tempdir,
test_case_info.vpp_pid)
if test_case_info in result.core_crash_test_cases_info:
check_and_handle_core(test_case_info.vpp_bin_path,
test_case_info.tempdir,
test_case_info.core_crash_test)
sys.exit(not was_successful)
else:
print('Running each VPPTestCase in a separate background process'
' with {} parallel process(es)'.format(concurrent_tests))
exit_code = 0
while suites and attempts > 0:
results = run_forked(suites)
exit_code, suites = parse_results(results)
attempts -= 1
if exit_code == 0:
print('Test run was successful')
else:
print('%s attempt(s) left.' % attempts)
sys.exit(exit_code)
| 39.832599
| 79
| 0.584025
|
aca4b77137128f0ceef93bbff2a3f68b4ee40681
| 2,434
|
py
|
Python
|
dlfairness/original_code/DomainBiasMitigation/compute_bias_nway.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/original_code/DomainBiasMitigation/compute_bias_nway.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/original_code/DomainBiasMitigation/compute_bias_nway.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import pickle
import numpy as np
import argparse
from pathlib import Path
GRAYSCALE = 0
COLOR = 1
parser = argparse.ArgumentParser()
parser.add_argument('--parent_folder', type=str)
parser.add_argument('--record_name', type=str)
parser.add_argument('--output_key', type=str, default='outputs')
args = parser.parse_args()
def compute_bias(score): # Score need to have shape [, 2]
global test_labels
domain_zeros = np.zeros([10000,], dtype=np.int32)
domain_ones = np.ones([10000,], dtype=np.int32)
domain_targets = np.concatenate([domain_zeros, domain_ones], axis=0)
class_targets = np.array(test_labels + test_labels)
target_domain_ratios = 0.5 * np.ones((10,))
domain_labels = ['Gray', 'Color']
class_count = score.shape[1]
test_set_size = class_targets.shape[0]
# cifar_inference()
#score = np.exp(score)
predicted_classes = np.argmax(score, axis=1)
# cifar_count_domain_incidence_from_gt()
count_per_class = np.zeros((class_count, 2), dtype=np.float64)
for i in range(test_set_size):
predicted_class = int(predicted_classes[i])
count_per_class[predicted_class][int(domain_targets[i])] += 1
bias = np.amax(count_per_class, axis=1) / np.sum(count_per_class, axis=1)
total_bias = np.abs(bias - 0.5)
mean_class_bias = np.mean(total_bias)
return mean_class_bias
def preprocess_score(gray_result, color_result): # np.exp applied to the score
score = np.concatenate([gray_result[args.output_key], color_result[args.output_key]], axis=0).astype(np.float128)
score = np.exp(score)
return score
color_result_path = Path(args.parent_folder, 'record', args.record_name, 'e1/test_color_result.pkl')
gray_result_path = Path(args.parent_folder, 'record', args.record_name, 'e1/test_gray_result.pkl')
test_label_path = Path(args.parent_folder, 'data/cifar_test_labels')
with open(color_result_path, 'rb') as f:
color_result = pickle.load(f)
with open(gray_result_path, 'rb') as f:
gray_result = pickle.load(f)
with open(test_label_path, 'rb') as f:
test_labels = pickle.load(f)
#print(color_result['outputs'].shape)
score = preprocess_score(gray_result, color_result)
mean_bias = compute_bias(score)
print(mean_bias)
output_file = Path(args.parent_folder, 'record', args.record_name, 'e1/bias_result.txt')
with open(output_file, 'w') as f:
f.write("Mean bias: {:4f}".format(mean_bias))
| 34.771429
| 117
| 0.724322
|
b98118fa6b1b7dae4406221ab9330bf37cefab57
| 57,250
|
py
|
Python
|
infoblox_netmri/api/broker/v2_9_0/discovery_hint_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 12
|
2016-02-19T12:37:54.000Z
|
2022-03-04T20:11:08.000Z
|
infoblox_netmri/api/broker/v2_9_0/discovery_hint_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 18
|
2015-11-12T18:37:00.000Z
|
2021-05-19T07:59:55.000Z
|
infoblox_netmri/api/broker/v2_9_0/discovery_hint_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 18
|
2016-01-07T12:04:34.000Z
|
2022-03-31T11:05:41.000Z
|
from ..broker import Broker
class DiscoveryHintBroker(Broker):
controller = "discovery_hints"
def index(self, **kwargs):
"""Lists the available discovery hints. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Array of Integer
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoveryHint. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hints: An array of the DiscoveryHint objects that match the specified input criteria.
:rtype discovery_hints: Array of DiscoveryHint
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available discovery hints matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoveryHint. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against discovery hints, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: hint, device_type.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hints: An array of the DiscoveryHint objects that match the specified input criteria.
:rtype discovery_hints: Array of DiscoveryHint
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available discovery hints matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: UnitID, cli_enable_password_secure, cli_user_name_secure_ssh, cli_user_name_secure_telnet, cli_user_password_secure_ssh, cli_user_password_secure_telnet, created_at, created_by, device_type, hint, id, secure_version, snmp_auth_password_secure, snmp_auth_protocol, snmp_community_secure, snmp_private_password_secure, snmp_private_protocol, snmp_protocol, updated_at, updated_by.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UnitID: The operator to apply to the field UnitID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UnitID: The internal NetMRI identifier collector assigned to the discovery hint. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UnitID: If op_UnitID is specified, the field named in this input will be compared to the value in UnitID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UnitID must be specified if op_UnitID is specified.
:type val_f_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UnitID: If op_UnitID is specified, this value will be compared to the value in UnitID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UnitID must be specified if op_UnitID is specified.
:type val_c_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_enable_password_secure: The operator to apply to the field cli_enable_password_secure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_enable_password_secure: Device specific CLI enable password for all protocols. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_enable_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_enable_password_secure: If op_cli_enable_password_secure is specified, the field named in this input will be compared to the value in cli_enable_password_secure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_enable_password_secure must be specified if op_cli_enable_password_secure is specified.
:type val_f_cli_enable_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_enable_password_secure: If op_cli_enable_password_secure is specified, this value will be compared to the value in cli_enable_password_secure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_enable_password_secure must be specified if op_cli_enable_password_secure is specified.
:type val_c_cli_enable_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_user_name_secure_ssh: The operator to apply to the field cli_user_name_secure_ssh. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_user_name_secure_ssh: Device specific CLI username for ssh protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_user_name_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_user_name_secure_ssh: If op_cli_user_name_secure_ssh is specified, the field named in this input will be compared to the value in cli_user_name_secure_ssh using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_user_name_secure_ssh must be specified if op_cli_user_name_secure_ssh is specified.
:type val_f_cli_user_name_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_user_name_secure_ssh: If op_cli_user_name_secure_ssh is specified, this value will be compared to the value in cli_user_name_secure_ssh using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_user_name_secure_ssh must be specified if op_cli_user_name_secure_ssh is specified.
:type val_c_cli_user_name_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_user_name_secure_telnet: The operator to apply to the field cli_user_name_secure_telnet. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_user_name_secure_telnet: Device specific CLI username for telnet protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_user_name_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_user_name_secure_telnet: If op_cli_user_name_secure_telnet is specified, the field named in this input will be compared to the value in cli_user_name_secure_telnet using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_user_name_secure_telnet must be specified if op_cli_user_name_secure_telnet is specified.
:type val_f_cli_user_name_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_user_name_secure_telnet: If op_cli_user_name_secure_telnet is specified, this value will be compared to the value in cli_user_name_secure_telnet using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_user_name_secure_telnet must be specified if op_cli_user_name_secure_telnet is specified.
:type val_c_cli_user_name_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_user_password_secure_ssh: The operator to apply to the field cli_user_password_secure_ssh. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_user_password_secure_ssh: Device specific CLI password for ssh protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_user_password_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_user_password_secure_ssh: If op_cli_user_password_secure_ssh is specified, the field named in this input will be compared to the value in cli_user_password_secure_ssh using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_user_password_secure_ssh must be specified if op_cli_user_password_secure_ssh is specified.
:type val_f_cli_user_password_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_user_password_secure_ssh: If op_cli_user_password_secure_ssh is specified, this value will be compared to the value in cli_user_password_secure_ssh using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_user_password_secure_ssh must be specified if op_cli_user_password_secure_ssh is specified.
:type val_c_cli_user_password_secure_ssh: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_user_password_secure_telnet: The operator to apply to the field cli_user_password_secure_telnet. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_user_password_secure_telnet: Device specific CLI password for telnet protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_user_password_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_user_password_secure_telnet: If op_cli_user_password_secure_telnet is specified, the field named in this input will be compared to the value in cli_user_password_secure_telnet using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_user_password_secure_telnet must be specified if op_cli_user_password_secure_telnet is specified.
:type val_f_cli_user_password_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_user_password_secure_telnet: If op_cli_user_password_secure_telnet is specified, this value will be compared to the value in cli_user_password_secure_telnet using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_user_password_secure_telnet must be specified if op_cli_user_password_secure_telnet is specified.
:type val_c_cli_user_password_secure_telnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the hint was created. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_by: The operator to apply to the field created_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_by: The user that initially created the discovery hint. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_by: If op_created_by is specified, the field named in this input will be compared to the value in created_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_by must be specified if op_created_by is specified.
:type val_f_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_by: If op_created_by is specified, this value will be compared to the value in created_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_by must be specified if op_created_by is specified.
:type val_c_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_type: The operator to apply to the field device_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_type: The device type applied to the given discovery hint. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_type: If op_device_type is specified, the field named in this input will be compared to the value in device_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_type must be specified if op_device_type is specified.
:type val_f_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_type: If op_device_type is specified, this value will be compared to the value in device_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_type must be specified if op_device_type is specified.
:type val_c_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_hint: The operator to apply to the field hint. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. hint: The hint used by the discovery engine. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_hint: If op_hint is specified, the field named in this input will be compared to the value in hint using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_hint must be specified if op_hint is specified.
:type val_f_hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_hint: If op_hint is specified, this value will be compared to the value in hint using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_hint must be specified if op_hint is specified.
:type val_c_hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the discovery hint. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_secure_version: The operator to apply to the field secure_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. secure_version: The encryption version of the username and passwords. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_secure_version: If op_secure_version is specified, the field named in this input will be compared to the value in secure_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_secure_version must be specified if op_secure_version is specified.
:type val_f_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_secure_version: If op_secure_version is specified, this value will be compared to the value in secure_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_secure_version must be specified if op_secure_version is specified.
:type val_c_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_auth_password_secure: The operator to apply to the field snmp_auth_password_secure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_auth_password_secure: The SNMPv3 authentication protocol password For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_auth_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_auth_password_secure: If op_snmp_auth_password_secure is specified, the field named in this input will be compared to the value in snmp_auth_password_secure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_auth_password_secure must be specified if op_snmp_auth_password_secure is specified.
:type val_f_snmp_auth_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_auth_password_secure: If op_snmp_auth_password_secure is specified, this value will be compared to the value in snmp_auth_password_secure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_auth_password_secure must be specified if op_snmp_auth_password_secure is specified.
:type val_c_snmp_auth_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_auth_protocol: The operator to apply to the field snmp_auth_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_auth_protocol: The SNMPv3 authentication protocol to use with this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_auth_protocol: If op_snmp_auth_protocol is specified, the field named in this input will be compared to the value in snmp_auth_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_auth_protocol must be specified if op_snmp_auth_protocol is specified.
:type val_f_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_auth_protocol: If op_snmp_auth_protocol is specified, this value will be compared to the value in snmp_auth_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_auth_protocol must be specified if op_snmp_auth_protocol is specified.
:type val_c_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_community_secure: The operator to apply to the field snmp_community_secure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_community_secure: The SNMP community string. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_community_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_community_secure: If op_snmp_community_secure is specified, the field named in this input will be compared to the value in snmp_community_secure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_community_secure must be specified if op_snmp_community_secure is specified.
:type val_f_snmp_community_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_community_secure: If op_snmp_community_secure is specified, this value will be compared to the value in snmp_community_secure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_community_secure must be specified if op_snmp_community_secure is specified.
:type val_c_snmp_community_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_private_password_secure: The operator to apply to the field snmp_private_password_secure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_private_password_secure: The SNMPv3 privacy protocol password. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_private_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_private_password_secure: If op_snmp_private_password_secure is specified, the field named in this input will be compared to the value in snmp_private_password_secure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_private_password_secure must be specified if op_snmp_private_password_secure is specified.
:type val_f_snmp_private_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_private_password_secure: If op_snmp_private_password_secure is specified, this value will be compared to the value in snmp_private_password_secure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_private_password_secure must be specified if op_snmp_private_password_secure is specified.
:type val_c_snmp_private_password_secure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_private_protocol: The operator to apply to the field snmp_private_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_private_protocol: The SNMPv3 privacy protocol to use with this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_private_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_private_protocol: If op_snmp_private_protocol is specified, the field named in this input will be compared to the value in snmp_private_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_private_protocol must be specified if op_snmp_private_protocol is specified.
:type val_f_snmp_private_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_private_protocol: If op_snmp_private_protocol is specified, this value will be compared to the value in snmp_private_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_private_protocol must be specified if op_snmp_private_protocol is specified.
:type val_c_snmp_private_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_protocol: The operator to apply to the field snmp_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_protocol: The SNMP protocol for which to use these credentials. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_protocol: If op_snmp_protocol is specified, the field named in this input will be compared to the value in snmp_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_protocol must be specified if op_snmp_protocol is specified.
:type val_f_snmp_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_protocol: If op_snmp_protocol is specified, this value will be compared to the value in snmp_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_protocol must be specified if op_snmp_protocol is specified.
:type val_c_snmp_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the hint was last modified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_by: The operator to apply to the field updated_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_by: The user that last updated the discovery hint. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_by: If op_updated_by is specified, the field named in this input will be compared to the value in updated_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_by must be specified if op_updated_by is specified.
:type val_f_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_by: If op_updated_by is specified, this value will be compared to the value in updated_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_by must be specified if op_updated_by is specified.
:type val_c_updated_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoveryHint. Valid values are id, hint, device_type, UnitID, created_by, updated_by, created_at, updated_at, cli_user_name_secure_ssh, cli_user_password_secure_ssh, snmp_protocol, snmp_community_secure, snmp_auth_protocol, snmp_auth_password_secure, snmp_private_protocol, snmp_private_password_secure, secure_version, cli_user_name_secure_telnet, cli_user_password_secure_telnet, cli_enable_password_secure. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hints: An array of the DiscoveryHint objects that match the specified input criteria.
:rtype discovery_hints: Array of DiscoveryHint
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The discovery hint identified by the specified id.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param hint: The hint used by the discovery engine.
:type hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_type: The device type applied to the given discovery hint.
:type device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint.
:type UnitID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created discovery hint.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created discovery hint.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created discovery hint.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The newly created discovery hint.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing discovery hint.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hint: The hint used by the discovery engine. If omitted, this field will not be updated.
:type hint: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_type: The device type applied to the given discovery hint. If omitted, this field will not be updated.
:type device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier collector assigned to the discovery hint. If omitted, this field will be updated to the default value.
:type UnitID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated discovery hint.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated discovery hint.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated discovery hint.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_hint: The updated discovery hint.
:rtype discovery_hint: DiscoveryHint
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified discovery hint from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery hint.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
| 58.959835
| 791
| 0.638777
|
d21e42616236376190f697d9debf9074d5c773d7
| 877
|
py
|
Python
|
profiles_project/urls.py
|
JBG0000/profiles-rest-api
|
f3d4c6a4b2df0da5401ec48d8c068dac08f057cb
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
JBG0000/profiles-rest-api
|
f3d4c6a4b2df0da5401ec48d8c068dac08f057cb
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
JBG0000/profiles-rest-api
|
f3d4c6a4b2df0da5401ec48d8c068dac08f057cb
|
[
"MIT"
] | null | null | null |
"""profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include #include : 다른 앱의 URL을 포함하는데 사용하는 기능
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('profiles_api.urls')), #해당 urls.py의 경로의 접두사 api/
]
| 38.130435
| 77
| 0.708096
|
b2ea6d0779757f9f2a94fbcf4fb048d321404281
| 442
|
py
|
Python
|
assignments/python3/hello/hello.py
|
jeremybergen/csci000-astudent
|
ab14521a476103ce60f5d8ef61095a5c712a64d4
|
[
"MIT"
] | 2
|
2020-08-19T15:35:20.000Z
|
2021-08-31T19:35:43.000Z
|
assignments/python3/hello/hello.py
|
jeremybergen/csci000-astudent
|
ab14521a476103ce60f5d8ef61095a5c712a64d4
|
[
"MIT"
] | null | null | null |
assignments/python3/hello/hello.py
|
jeremybergen/csci000-astudent
|
ab14521a476103ce60f5d8ef61095a5c712a64d4
|
[
"MIT"
] | 18
|
2020-08-17T03:54:40.000Z
|
2021-10-12T08:06:23.000Z
|
#!/usr/bin/env python3
# Kattis - hello problem
import sys
def answer():
return "Hello World!"
# not used!
def greet(name):
ans = f'Hello {name}!'
return ans
def solve():
print(answer())
def test():
assert answer() == "Hello World!"
print('all test cases passed...')
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'test':
#print(sys.argv)
test()
else:
solve()
| 17
| 51
| 0.567873
|
218ed1ea2e511e45b1aa8df8ccb8fa7b553ec6df
| 6,272
|
py
|
Python
|
xarray/tests/test_plugins.py
|
jhamman/xarray-test-docs
|
c54123772817875678ec7ad769e6d4d6612aeb92
|
[
"Apache-2.0"
] | 1
|
2022-02-18T10:14:38.000Z
|
2022-02-18T10:14:38.000Z
|
xarray/tests/test_plugins.py
|
jhamman/xarray-test-docs
|
c54123772817875678ec7ad769e6d4d6612aeb92
|
[
"Apache-2.0"
] | 3
|
2022-03-22T20:52:33.000Z
|
2022-03-22T20:52:36.000Z
|
xarray/tests/test_plugins.py
|
jhamman/xarray-test-docs
|
c54123772817875678ec7ad769e6d4d6612aeb92
|
[
"Apache-2.0"
] | null | null | null |
from importlib.metadata import EntryPoint
from unittest import mock
import pytest
from xarray.backends import common, plugins
importlib_metadata_mock = "importlib.metadata"
class DummyBackendEntrypointArgs(common.BackendEntrypoint):
def open_dataset(filename_or_obj, *args):
pass
class DummyBackendEntrypointKwargs(common.BackendEntrypoint):
def open_dataset(filename_or_obj, **kwargs):
pass
class DummyBackendEntrypoint1(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder):
pass
class DummyBackendEntrypoint2(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder):
pass
@pytest.fixture
def dummy_duplicated_entrypoints():
specs = [
["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine1", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
]
eps = [EntryPoint(name, value, group) for name, value, group in specs]
return eps
@pytest.mark.filterwarnings("ignore:Found")
def test_remove_duplicates(dummy_duplicated_entrypoints) -> None:
with pytest.warns(RuntimeWarning):
entrypoints = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(entrypoints) == 2
def test_broken_plugin() -> None:
broken_backend = EntryPoint(
"broken_backend",
"xarray.tests.test_plugins:backend_1",
"xarray.backends",
)
with pytest.warns(RuntimeWarning) as record:
_ = plugins.build_engines([broken_backend])
assert len(record) == 1
message = str(record[0].message)
assert "Engine 'broken_backend'" in message
def test_remove_duplicates_warnings(dummy_duplicated_entrypoints) -> None:
with pytest.warns(RuntimeWarning) as record:
_ = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(record) == 2
message0 = str(record[0].message)
message1 = str(record[1].message)
assert "entrypoints" in message0
assert "entrypoints" in message1
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load", mock.MagicMock(return_value=None)
)
def test_backends_dict_from_pkg() -> None:
specs = [
["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
]
entrypoints = [EntryPoint(name, value, group) for name, value, group in specs]
engines = plugins.backends_dict_from_pkg(entrypoints)
assert len(engines) == 2
assert engines.keys() == {"engine1", "engine2"}
def test_set_missing_parameters() -> None:
backend_1 = DummyBackendEntrypoint1
backend_2 = DummyBackendEntrypoint2
backend_2.open_dataset_parameters = ("filename_or_obj",)
engines = {"engine_1": backend_1, "engine_2": backend_2}
plugins.set_missing_parameters(engines)
assert len(engines) == 2
assert backend_1.open_dataset_parameters == ("filename_or_obj", "decoder")
assert backend_2.open_dataset_parameters == ("filename_or_obj",)
backend = DummyBackendEntrypointKwargs()
backend.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend})
assert backend.open_dataset_parameters == ("filename_or_obj", "decoder")
backend_args = DummyBackendEntrypointArgs()
backend_args.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend_args})
assert backend_args.open_dataset_parameters == ("filename_or_obj", "decoder")
def test_set_missing_parameters_raise_error() -> None:
backend = DummyBackendEntrypointKwargs()
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend})
backend_args = DummyBackendEntrypointArgs()
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend_args})
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines() -> None:
dummy_pkg_entrypoint = EntryPoint(
"cfgrib", "xarray.tests.test_plugins:backend_1", "xarray_backends"
)
backend_entrypoints = plugins.build_engines([dummy_pkg_entrypoint])
assert isinstance(backend_entrypoints["cfgrib"], DummyBackendEntrypoint1)
assert backend_entrypoints["cfgrib"].open_dataset_parameters == (
"filename_or_obj",
"decoder",
)
@mock.patch(
f"{importlib_metadata_mock}.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines_sorted() -> None:
dummy_pkg_entrypoints = [
EntryPoint("dummy2", "xarray.tests.test_plugins:backend_1", "xarray.backends"),
EntryPoint("dummy1", "xarray.tests.test_plugins:backend_1", "xarray.backends"),
]
backend_entrypoints = plugins.build_engines(dummy_pkg_entrypoints)
backend_entrypoints = list(backend_entrypoints)
indices = []
for be in plugins.STANDARD_BACKENDS_ORDER:
try:
index = backend_entrypoints.index(be)
backend_entrypoints.pop(index)
indices.append(index)
except ValueError:
pass
assert set(indices) < {0, -1}
assert list(backend_entrypoints) == sorted(backend_entrypoints)
@mock.patch(
"xarray.backends.plugins.list_engines",
mock.MagicMock(return_value={"dummy": DummyBackendEntrypointArgs()}),
)
def test_no_matching_engine_found() -> None:
with pytest.raises(ValueError, match=r"did not find a match in any"):
plugins.guess_engine("not-valid")
with pytest.raises(ValueError, match=r"found the following matches with the input"):
plugins.guess_engine("foo.nc")
@mock.patch(
"xarray.backends.plugins.list_engines",
mock.MagicMock(return_value={}),
)
def test_engines_not_installed() -> None:
with pytest.raises(ValueError, match=r"xarray is unable to open"):
plugins.guess_engine("not-valid")
with pytest.raises(ValueError, match=r"found the following matches with the input"):
plugins.guess_engine("foo.nc")
| 33.72043
| 88
| 0.722417
|
a2d9a9c90c3346a98f14ce1bb809553940ade3ed
| 10,976
|
py
|
Python
|
plotly/graph_objs/box/hoverlabel/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 6
|
2019-05-03T02:12:04.000Z
|
2020-03-01T06:33:21.000Z
|
plotly/graph_objs/box/hoverlabel/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | null | null | null |
plotly/graph_objs/box/hoverlabel/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 5
|
2019-05-18T16:50:11.000Z
|
2021-07-06T21:14:36.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'box.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.box.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.box.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.box.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.box.hoverlabel import (font as v_font)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.086957
| 82
| 0.562773
|
92bbb45eafbb483591507719cfc0cddc6c802e3a
| 317
|
py
|
Python
|
examples/tutorials/django/blog/urls.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 1,974
|
2015-01-05T01:45:10.000Z
|
2022-03-28T14:35:52.000Z
|
examples/tutorials/django/blog/urls.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 536
|
2015-01-06T20:33:40.000Z
|
2022-03-03T16:22:21.000Z
|
examples/tutorials/django/blog/urls.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 132
|
2015-02-10T11:03:30.000Z
|
2022-01-28T12:59:44.000Z
|
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from . import views
cache_15m = cache_page(60 * 15)
urlpatterns = [
url(r'^$', views.ArticleList.as_view(), name='article_list'),
url(r'^(?P<pk>\d+?)$', cache_15m(views.ArticleDetail.as_view()), name='article_detail'),
]
| 28.818182
| 92
| 0.709779
|
be2b32d00fc48de3c8f3e57f5c1086d089a53b9c
| 6,096
|
py
|
Python
|
bv2cid.py
|
fossabot/Bilibili_BV_Converter
|
48aabddeb6dd66838db2d2a765cea8de497e9fd1
|
[
"MIT"
] | 4
|
2020-10-15T01:16:59.000Z
|
2021-02-26T02:47:16.000Z
|
bv2cid.py
|
fossabot/Bilibili_BV_Converter
|
48aabddeb6dd66838db2d2a765cea8de497e9fd1
|
[
"MIT"
] | 3
|
2021-02-23T03:49:31.000Z
|
2022-01-06T10:08:10.000Z
|
bv2cid.py
|
fossabot/Bilibili_BV_Converter
|
48aabddeb6dd66838db2d2a765cea8de497e9fd1
|
[
"MIT"
] | 6
|
2020-11-02T23:20:01.000Z
|
2022-01-18T15:16:25.000Z
|
import requests
import sys
import locale
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'DNT': '1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4083.0 Safari/537.36 Edg/82.0.458.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Accept-Language': 'en-US,en;q=0.9'
}
chs = [
'请输入BV号:',
'输入错误!BV号应由\'BV\'开头!',
'查询出错了!\n可能的原因:\n\t该视频不存在所指定的p!',
'查询出错了!\n可能的原因:\n\t1、你调用的次数太多暂时冻结了,要过一会儿才可以继续查询!\n\t2、你的网络可能出现了一些异常\n\t3、不存在这一个视频',
'抱歉,请重试!',
'对应的CID号为:',
'请输入你需要查询的对应视频的p数(仅限数字)\n(0代表视频第一p,1代表视频第二p,以此类推)\n',
'输入错误!p数需为自然数!'
]
eng = [
'Please input BV number:',
'Input Error! BV number should start with \'BV\'!',
'Query Error!\nPossible Reason:\n\tThis video might not have an AV number',
'Query Error!\nPossible Reasons:\n\t1. You use this too much in short period, please query later!' +
'\n\t2.You might encountered with some internet issues\n\t3.The video does not exist',
'Sorry, please try again!',
'The CID number for the video you input is: ',
'Please input the index of the part of the video you want to\n(Number only, 0 stands for the first part, ' +
'1 stands for the second part, and so on):\n',
'Input error! The part number should be a whole number!'
]
# Function when it is short link
def get_url(link):
response = requests.get(link, headers=headers)
return response.url
language = locale.getdefaultlocale()[0]
try:
temp = sys.argv[1] # BV号是否在命令行中存在
temp = sys.argv[2] # 对应的p
except IndexError:
if language == 'zh_CN':
print('你当前的系统语言是简体中文,需要更改程序显示语言吗?')
print('Your current language display is Chinese Simplified, do you want to switch language?')
lang = chs
else:
print('你当前的系统语言不是简体中文,需要更改程序显示语言吗?')
print('Your current language display is English, do you want to switch language?')
lang = eng
while True:
switch_confirmation = input('(Y/N)')
if switch_confirmation.lower() == 'y' or switch_confirmation.lower() == 'yes' \
or switch_confirmation.lower() == 't' or switch_confirmation.lower() == 'true'\
or switch_confirmation == '是' or switch_confirmation == '确定':
switch = True
break
elif switch_confirmation.lower() == 'n' or switch_confirmation.lower() == 'no' \
or switch_confirmation.lower() == 'f' or switch_confirmation.lower() == 'false'\
or switch_confirmation == '否':
switch = False
break
else:
print('输入错误!')
print('Unknown Selection, please try again!')
if switch:
while True:
print('1. 简体中文\n2. English')
language_selection = input('请选择语言(Select language):')
if language_selection == '1' or language_selection.lower() == 'chs' or language_selection == '简体中文' \
or language_selection == '中文' or language_selection.lower() == 'cn' \
or language_selection.lower() == 'china' or language_selection.lower() == 'chinese':
lang = chs
break
elif language_selection == '2' or language_selection.lower() == 'eng' \
or language_selection.lower() == 'english':
lang = eng
break
else:
print('输入错误,请重试\n(Unknown Selection, please try again)')
else:
if language == 'zh_CN':
lang = chs
else:
lang = eng
try:
sys.argv[1]
except IndexError:
BV_Number = input(lang[0])
while True:
if BV_Number.lower().find('bilibili.com') > 0 \
or BV_Number.lower().find('b23.tv') > 0:
if get_url(BV_Number).upper().find('BV') < len(get_url(BV_Number)) - 2:
BV_Number = get_url(BV_Number)
break
else:
print(lang[1])
BV_Number = input(lang[0])
else:
if BV_Number.upper().find('BV') == 0:
break
else:
print(lang[1])
BV_Number = input(lang[0])
else:
BV_Number = sys.argv[1]
while True:
if BV_Number.lower().find('bilibili.com') > 0 \
or BV_Number.lower().find('b23.tv') > 0:
if get_url(BV_Number).upper().find('BV') < len(get_url(BV_Number)) - 2:
BV_Number = get_url(BV_Number)
break
else:
print(lang[1])
BV_Number = input(lang[0])
else:
if BV_Number.upper().find('BV') == 0:
break
else:
print(lang[1])
BV_Number = input(lang[0])
if BV_Number.find('?') != -1:
BV_Number = "BV" + BV_Number[BV_Number.upper().find('BV')+2:BV_Number.find('?')]
else:
BV_Number = "BV" + BV_Number[BV_Number.upper().find('BV')+2:]
try:
sys.argv[2]
except IndexError:
while True:
try:
p_number = int(input(lang[6]))
except TypeError:
print(lang[7])
else:
if p_number < 0:
print(lang[7])
else:
break
else:
try:
p_number = int(sys.argv[2])
except TypeError:
print(lang[7])
sys.exit(0)
else:
if p_number < 0:
print(lang[7])
sys.exit(0)
url = 'https://api.bilibili.com/x/player/pagelist?bvid={}&jsonp=jsonp'
r = requests.get(url.format(BV_Number), headers=headers)
if r.status_code == 200:
try:
j_cid = r.json()['data'][p_number]['cid']
print(lang[5] + str(j_cid))
except TypeError:
print(lang[2])
except IndexError:
print(lang[2])
else:
print(lang[3])
| 33.866667
| 155
| 0.56332
|
75254bec863a910507f0e5f9e1d0b3fd7fbbdb7c
| 2,963
|
py
|
Python
|
src/hypothesis_faker/utilities.py
|
dycw/hypothesis-faker
|
dc63870c0834531cc8029d383e0d44dca0257090
|
[
"MIT"
] | 1
|
2021-04-30T18:09:41.000Z
|
2021-04-30T18:09:41.000Z
|
src/hypothesis_faker/utilities.py
|
dycw/hypothesis-faker
|
dc63870c0834531cc8029d383e0d44dca0257090
|
[
"MIT"
] | null | null | null |
src/hypothesis_faker/utilities.py
|
dycw/hypothesis-faker
|
dc63870c0834531cc8029d383e0d44dca0257090
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import re
from bisect import bisect
from functools import reduce
from re import sub
from string import ascii_lowercase
from typing import Callable
from typing import Iterable
from typing import cast
from hypothesis.strategies import SearchStrategy
from hypothesis.strategies import composite
from hypothesis.strategies import floats
from hypothesis.strategies import integers
from hypothesis.strategies import just
from hypothesis.strategies import sampled_from
from hypothesis_faker.types import Num
from hypothesis_faker.types import T
ascii_lowercase_letters = sampled_from(ascii_lowercase)
digits_0_9 = integers(0, 9).map(str)
digits_1_9 = integers(1, 9).map(str)
empty_str = just("")
def fill_format_string(format_: str, replacements: dict[str, str]) -> str:
return reduce(_fill_format_1, replacements.items(), format_)
def _fill_format_1(format_: str, pair: tuple[str, str]) -> str:
token, replacement = pair
return sub(f"{{{{{token}}}}}", replacement, format_)
def numerified(format_: str) -> SearchStrategy[str]:
@composite
def inner(draw: Callable[[SearchStrategy[T]], T]) -> str:
chars = []
for char in format_:
if char == "#":
chars.append(draw(digits_0_9))
elif char == "%":
chars.append(draw(digits_1_9))
elif char == "!":
chars.append(draw(digits_0_9 | empty_str))
elif char == "%":
chars.append(draw(digits_1_9 | empty_str))
else:
chars.append(char)
return "".join(chars)
return inner()
PATTERN_FOR_DOUBLE_BRACES = re.compile(r"{{(\w+)}}")
class WeightedList(list[tuple[T, Num]]):
def __init__(self, items: Iterable[tuple[T, Num]]) -> None:
super().__init__(items)
if not self:
raise ValueError(f"{self} cannot be empty")
try:
elements, weights = zip(*self)
except TypeError:
raise TypeError(f"{self} could not be zipped into 2 lists")
self._elements = cast(list[T], list(elements))
self._weights = cast(list[Num], list(weights))
self.total_weight, self._cum_weights = 0.0, []
for weight in self._weights:
if weight < 0.0:
raise ValueError(f"Invalid {weight=}")
self.total_weight += weight
self._cum_weights.append(self.total_weight)
def __add__(self, other: WeightedList[T]) -> WeightedList[T]:
return WeightedList(super().__add__(other))
def __getitem__(self, item: Num) -> T:
if not 0.0 <= item < self.total_weight:
raise IndexError(f"Invalid {item=}")
return self._elements[bisect(self._cum_weights, item)]
def weighted_samples(wlist: WeightedList[T]) -> SearchStrategy[T]:
def inner(i: float) -> T:
return wlist[i]
return floats(0.0, wlist.total_weight, exclude_max=True).map(inner)
| 31.860215
| 74
| 0.652717
|
f026a34bb023915faa40ec6216dec07ae6a831e2
| 701
|
py
|
Python
|
001383WiseplatMarathPyBegin/day5_my1_IfElse.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001383WiseplatMarathPyBegin/day5_my1_IfElse.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001383WiseplatMarathPyBegin/day5_my1_IfElse.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
f = '10'
if f == str(10):
print("f==10")
h = 10
if h == int('10'):
print("h=='10'")
j = "2.53"
print(float(j) + 7.5)
age = int(input("Введите Ваш возраст: "))
if age == 5:
print("5!")
elif age == 6:
print("6!")
elif age == 7:
print("7!")
if age < 100:
print("age<100")
else:
print("Вам больше или равно 100")
if age > 20:
print("О, вы так стары?!")
print("Вам больше, чем 20?")
if age < 10:
print("О, Вам так мало лет?")
if age == 18:
print("Ура, мне 18, еду, куда хочу!")
if age >= 30 and age <= 40:
print("Ооо, от 30 до 40!")
if age == 50 or age == 60 or age == 70 or age == 80:
print("О, у Вас юбилей")
print("Мы проверили Ваш возраст")
| 17.097561
| 52
| 0.529244
|
5ccfe57a7b36f5c92c3a6ab774521b4c16fe408f
| 865
|
py
|
Python
|
3_sentence_corpus_cleaning.py
|
yerlandinata/building-semantic-corpus-crowdsourcing-and-machine-learning
|
e50519b80c30318e259e544fa1813fd003dd94c3
|
[
"MIT"
] | 1
|
2020-04-16T02:35:11.000Z
|
2020-04-16T02:35:11.000Z
|
3_sentence_corpus_cleaning.py
|
yerlandinata/building-semantic-corpus-crowdsourcing-and-machine-learning
|
e50519b80c30318e259e544fa1813fd003dd94c3
|
[
"MIT"
] | null | null | null |
3_sentence_corpus_cleaning.py
|
yerlandinata/building-semantic-corpus-crowdsourcing-and-machine-learning
|
e50519b80c30318e259e544fa1813fd003dd94c3
|
[
"MIT"
] | null | null | null |
import datetime
from nltk.tokenize import word_tokenize
from codes.cleaner import is_valid_word
from codes.utils import pipe
src = open(input('input file: '), 'r')
dest = open(input('output file: '), 'w')
start_time = datetime.datetime.now()
i = 0
while True:
line = src.readline()
if line == '':
break
identifier, sentence = line.split('\t')
if 'ALIH' in sentence:
continue
tokenized = word_tokenize(sentence)
sentence = ' '.join(list(filter(is_valid_word, tokenized)))
sentence = sentence.replace('/', ' / ').replace(' ', ' ')
dest.write(identifier + '\t' + sentence.strip() + '\n')
i += 1
if i % 25000 == 0:
print('\rprocessed entries: {} | elapsed: {}'.format(
i, str(datetime.datetime.now() - start_time).split('.')[0]
), end='', flush=True)
src.close()
dest.close()
| 27.903226
| 70
| 0.60578
|
6b953eefe3b4e0bfa4f98f8fad0da03fa47d0688
| 78,167
|
py
|
Python
|
libs/z3/scripts/update_api.py
|
vananhnt/corana
|
882414dcb5b5a0460bf7a6c498a976195ba0e44e
|
[
"MIT"
] | 55
|
2015-08-15T22:37:02.000Z
|
2022-03-27T03:08:02.000Z
|
scripts/update_api.py
|
PhucVH888/z3
|
f7402f8b6134e366a2060571c5176c0e6ea34ddc
|
[
"MIT"
] | 10
|
2015-08-27T05:01:14.000Z
|
2020-06-29T03:53:54.000Z
|
scripts/update_api.py
|
PhucVH888/z3
|
f7402f8b6134e366a2060571c5176c0e6ea34ddc
|
[
"MIT"
] | 18
|
2015-08-11T08:37:41.000Z
|
2021-09-16T14:24:04.000Z
|
#!/usr/bin/env python
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Scripts for generating Makefiles and Visual
# Studio project files.
#
# Author: Leonardo de Moura (leonardo)
############################################
"""
This script generates the ``api_log_macros.h``,
``api_log_macros.cpp`` and ``api_commands.cpp``
files for the "api" module based on parsing
several API header files. It can also optionally
emit some of the files required for Z3's different
language bindings.
"""
import mk_util
import mk_exception
import argparse
import logging
import re
import os
import sys
##########################################################
# TODO: rewrite this file without using global variables.
# This file is a big HACK.
# It started as small simple script.
# Now, it is too big, and is invoked from mk_make.py
#
##########################################################
IN = 0
OUT = 1
INOUT = 2
IN_ARRAY = 3
OUT_ARRAY = 4
INOUT_ARRAY = 5
OUT_MANAGED_ARRAY = 6
# Primitive Types
VOID = 0
VOID_PTR = 1
INT = 2
UINT = 3
INT64 = 4
UINT64 = 5
STRING = 6
STRING_PTR = 7
BOOL = 8
SYMBOL = 9
PRINT_MODE = 10
ERROR_CODE = 11
DOUBLE = 12
FLOAT = 13
FIRST_OBJ_ID = 100
def is_obj(ty):
return ty >= FIRST_OBJ_ID
Type2Str = { VOID : 'void', VOID_PTR : 'void*', INT : 'int', UINT : 'unsigned', INT64 : 'int64_t', UINT64 : 'uint64_t', DOUBLE : 'double',
FLOAT : 'float', STRING : 'Z3_string', STRING_PTR : 'Z3_string_ptr', BOOL : 'bool', SYMBOL : 'Z3_symbol',
PRINT_MODE : 'Z3_ast_print_mode', ERROR_CODE : 'Z3_error_code'
}
Type2PyStr = { VOID_PTR : 'ctypes.c_void_p', INT : 'ctypes.c_int', UINT : 'ctypes.c_uint', INT64 : 'ctypes.c_longlong',
UINT64 : 'ctypes.c_ulonglong', DOUBLE : 'ctypes.c_double', FLOAT : 'ctypes.c_float',
STRING : 'ctypes.c_char_p', STRING_PTR : 'ctypes.POINTER(ctypes.c_char_p)', BOOL : 'ctypes.c_bool', SYMBOL : 'Symbol',
PRINT_MODE : 'ctypes.c_uint', ERROR_CODE : 'ctypes.c_uint'
}
# Mapping to .NET types
Type2Dotnet = { VOID : 'void', VOID_PTR : 'IntPtr', INT : 'int', UINT : 'uint', INT64 : 'Int64', UINT64 : 'UInt64', DOUBLE : 'double',
FLOAT : 'float', STRING : 'string', STRING_PTR : 'byte**', BOOL : 'byte', SYMBOL : 'IntPtr',
PRINT_MODE : 'uint', ERROR_CODE : 'uint' }
# Mapping to Java types
Type2Java = { VOID : 'void', VOID_PTR : 'long', INT : 'int', UINT : 'int', INT64 : 'long', UINT64 : 'long', DOUBLE : 'double',
FLOAT : 'float', STRING : 'String', STRING_PTR : 'StringPtr',
BOOL : 'boolean', SYMBOL : 'long', PRINT_MODE : 'int', ERROR_CODE : 'int'}
Type2JavaW = { VOID : 'void', VOID_PTR : 'jlong', INT : 'jint', UINT : 'jint', INT64 : 'jlong', UINT64 : 'jlong', DOUBLE : 'jdouble',
FLOAT : 'jfloat', STRING : 'jstring', STRING_PTR : 'jobject',
BOOL : 'jboolean', SYMBOL : 'jlong', PRINT_MODE : 'jint', ERROR_CODE : 'jint'}
# Mapping to ML types
Type2ML = { VOID : 'unit', VOID_PTR : 'VOIDP', INT : 'int', UINT : 'int', INT64 : 'int', UINT64 : 'int', DOUBLE : 'float',
FLOAT : 'float', STRING : 'string', STRING_PTR : 'char**',
BOOL : 'bool', SYMBOL : 'z3_symbol', PRINT_MODE : 'int', ERROR_CODE : 'int' }
next_type_id = FIRST_OBJ_ID
def def_Type(var, c_type, py_type):
global next_type_id
exec('%s = %s' % (var, next_type_id), globals())
Type2Str[next_type_id] = c_type
Type2PyStr[next_type_id] = py_type
next_type_id = next_type_id + 1
def def_Types(api_files):
pat1 = re.compile(" *def_Type\(\'(.*)\',[^\']*\'(.*)\',[^\']*\'(.*)\'\)[ \t]*")
for api_file in api_files:
api = open(api_file, 'r')
for line in api:
m = pat1.match(line)
if m:
def_Type(m.group(1), m.group(2), m.group(3))
for k in Type2Str:
v = Type2Str[k]
if is_obj(k):
Type2Dotnet[k] = v
Type2ML[k] = v.lower()
def type2str(ty):
global Type2Str
return Type2Str[ty]
def type2pystr(ty):
global Type2PyStr
return Type2PyStr[ty]
def type2dotnet(ty):
global Type2Dotnet
return Type2Dotnet[ty]
def type2java(ty):
global Type2Java
if (ty >= FIRST_OBJ_ID):
return 'long'
else:
return Type2Java[ty]
def type2javaw(ty):
global Type2JavaW
if (ty >= FIRST_OBJ_ID):
return 'jlong'
else:
return Type2JavaW[ty]
def type2ml(ty):
global Type2ML
q = Type2ML[ty]
if q[0:3] == 'z3_':
return q[3:]
else:
return q;
def _in(ty):
return (IN, ty)
def _in_array(sz, ty):
return (IN_ARRAY, ty, sz)
def _out(ty):
return (OUT, ty)
def _out_array(sz, ty):
return (OUT_ARRAY, ty, sz, sz)
# cap contains the position of the argument that stores the capacity of the array
# sz contains the position of the output argument that stores the (real) size of the array
def _out_array2(cap, sz, ty):
return (OUT_ARRAY, ty, cap, sz)
def _inout_array(sz, ty):
return (INOUT_ARRAY, ty, sz, sz)
def _out_managed_array(sz,ty):
return (OUT_MANAGED_ARRAY, ty, 0, sz)
def param_kind(p):
return p[0]
def param_type(p):
return p[1]
def param_array_capacity_pos(p):
return p[2]
def param_array_size_pos(p):
return p[3]
def param2str(p):
if param_kind(p) == IN_ARRAY:
return "%s const *" % type2str(param_type(p))
elif param_kind(p) == OUT_ARRAY or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY:
return "%s*" % type2str(param_type(p))
elif param_kind(p) == OUT:
return "%s*" % type2str(param_type(p))
else:
return type2str(param_type(p))
def param2dotnet(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == STRING:
return "out IntPtr"
else:
return "[In, Out] ref %s" % type2dotnet(param_type(p))
elif k == IN_ARRAY:
return "[In] %s[]" % type2dotnet(param_type(p))
elif k == INOUT_ARRAY:
return "[In, Out] %s[]" % type2dotnet(param_type(p))
elif k == OUT_ARRAY:
return "[Out] %s[]" % type2dotnet(param_type(p))
elif k == OUT_MANAGED_ARRAY:
return "[Out] out %s[]" % type2dotnet(param_type(p))
else:
return type2dotnet(param_type(p))
def param2java(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == INT or param_type(p) == UINT:
return "IntPtr"
elif param_type(p) == INT64 or param_type(p) == UINT64 or param_type(p) == VOID_PTR or param_type(p) >= FIRST_OBJ_ID:
return "LongPtr"
elif param_type(p) == STRING:
return "StringPtr"
else:
print("ERROR: unreachable code")
assert(False)
exit(1)
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
return "%s[]" % type2java(param_type(p))
elif k == OUT_MANAGED_ARRAY:
if param_type(p) == UINT:
return "UIntArrayPtr"
else:
return "ObjArrayPtr"
else:
return type2java(param_type(p))
def param2javaw(p):
k = param_kind(p)
if k == OUT:
return "jobject"
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL:
return "jintArray"
else:
return "jlongArray"
elif k == OUT_MANAGED_ARRAY:
return "jlong"
else:
return type2javaw(param_type(p))
def param2pystr(p):
if param_kind(p) == IN_ARRAY or param_kind(p) == OUT_ARRAY or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT:
return "ctypes.POINTER(%s)" % type2pystr(param_type(p))
else:
return type2pystr(param_type(p))
def param2ml(p):
k = param_kind(p)
if k == OUT:
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL or param_type(p) == INT64 or param_type(p) == UINT64:
return "int"
elif param_type(p) == STRING:
return "string"
else:
return "ptr"
elif k == IN_ARRAY or k == INOUT_ARRAY or k == OUT_ARRAY:
return "%s list" % type2ml(param_type(p))
elif k == OUT_MANAGED_ARRAY:
return "%s list" % type2ml(param_type(p))
else:
return type2ml(param_type(p))
# Save name, result, params to generate wrapper
_API2PY = []
def mk_py_binding(name, result, params):
global core_py
global _API2PY
_API2PY.append((name, result, params))
if result != VOID:
core_py.write("_lib.%s.restype = %s\n" % (name, type2pystr(result)))
core_py.write("_lib.%s.argtypes = [" % name)
first = True
for p in params:
if first:
first = False
else:
core_py.write(", ")
core_py.write(param2pystr(p))
core_py.write("]\n")
def extra_API(name, result, params):
mk_py_binding(name, result, params)
reg_dotnet(name, result, params)
def display_args(num):
for i in range(num):
if i > 0:
core_py.write(", ")
core_py.write("a%s" % i)
def display_args_to_z3(params):
i = 0
for p in params:
if i > 0:
core_py.write(", ")
if param_type(p) == STRING:
core_py.write("_to_ascii(a%s)" % i)
else:
core_py.write("a%s" % i)
i = i + 1
NULLWrapped = [ 'Z3_mk_context', 'Z3_mk_context_rc' ]
Unwrapped = [ 'Z3_del_context', 'Z3_get_error_code' ]
def mk_py_wrappers():
core_py.write("""
class Elementaries:
def __init__(self, f):
self.f = f
self.get_error_code = _lib.Z3_get_error_code
self.get_error_message = _lib.Z3_get_error_msg
self.OK = Z3_OK
self.Exception = Z3Exception
def Check(self, ctx):
err = self.get_error_code(ctx)
if err != self.OK:
raise self.Exception(self.get_error_message(ctx, err))
def Z3_set_error_handler(ctx, hndlr, _elems=Elementaries(_lib.Z3_set_error_handler)):
ceh = _error_handler_type(hndlr)
_elems.f(ctx, ceh)
_elems.Check(ctx)
return ceh
""")
for sig in _API2PY:
mk_py_wrapper_single(sig)
if sig[1] == STRING:
mk_py_wrapper_single(sig, decode_string=False)
def mk_py_wrapper_single(sig, decode_string=True):
name = sig[0]
result = sig[1]
params = sig[2]
num = len(params)
def_name = name
if not decode_string:
def_name += '_bytes'
core_py.write("def %s(" % def_name)
display_args(num)
comma = ", " if num != 0 else ""
core_py.write("%s_elems=Elementaries(_lib.%s)):\n" % (comma, name))
lval = "r = " if result != VOID else ""
core_py.write(" %s_elems.f(" % lval)
display_args_to_z3(params)
core_py.write(")\n")
if len(params) > 0 and param_type(params[0]) == CONTEXT and not name in Unwrapped:
core_py.write(" _elems.Check(a0)\n")
if result == STRING and decode_string:
core_py.write(" return _to_pystr(r)\n")
elif result != VOID:
core_py.write(" return r\n")
core_py.write("\n")
## .NET API native interface
_dotnet_decls = []
def reg_dotnet(name, result, params):
global _dotnet_decls
_dotnet_decls.append((name, result, params))
def mk_dotnet(dotnet):
global Type2Str
dotnet.write('// Automatically generated file\n')
dotnet.write('using System;\n')
dotnet.write('using System.Collections.Generic;\n')
dotnet.write('using System.Text;\n')
dotnet.write('using System.Runtime.InteropServices;\n\n')
dotnet.write('#pragma warning disable 1591\n\n')
dotnet.write('namespace Microsoft.Z3\n')
dotnet.write('{\n')
for k in Type2Str:
v = Type2Str[k]
if is_obj(k):
dotnet.write(' using %s = System.IntPtr;\n' % v)
dotnet.write('\n')
dotnet.write(' public class Native\n')
dotnet.write(' {\n\n')
dotnet.write(' [UnmanagedFunctionPointer(CallingConvention.Cdecl)]\n')
dotnet.write(' public delegate void Z3_error_handler(Z3_context c, Z3_error_code e);\n\n')
dotnet.write(' public class LIB\n')
dotnet.write(' {\n')
dotnet.write(' const string Z3_DLL_NAME = \"libz3\";\n'
' \n')
dotnet.write(' [DllImport(Z3_DLL_NAME, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]\n')
dotnet.write(' public extern static void Z3_set_error_handler(Z3_context a0, Z3_error_handler a1);\n\n')
for name, result, params in _dotnet_decls:
dotnet.write(' [DllImport(Z3_DLL_NAME, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]\n')
dotnet.write(' ')
if result == STRING:
dotnet.write('public extern static IntPtr %s(' % (name))
else:
dotnet.write('public extern static %s %s(' % (type2dotnet(result), name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
dotnet.write('%s a%d' % (param2dotnet(param), i))
i = i + 1
dotnet.write(');\n\n')
dotnet.write(' }\n')
def mk_dotnet_wrappers(dotnet):
global Type2Str
dotnet.write("\n")
dotnet.write(" public static void Z3_set_error_handler(Z3_context a0, Z3_error_handler a1) {\n")
dotnet.write(" LIB.Z3_set_error_handler(a0, a1);\n")
dotnet.write(" Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n")
dotnet.write(" if (err != Z3_error_code.Z3_OK)\n")
dotnet.write(" throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n")
dotnet.write(" }\n\n")
for name, result, params in _dotnet_decls:
if result == STRING:
dotnet.write(' public static string %s(' % (name))
else:
dotnet.write(' public static %s %s(' % (type2dotnet(result), name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
dotnet.write('%s a%d' % (param2dotnet(param), i))
i = i + 1
dotnet.write(') {\n')
dotnet.write(' ')
if result == STRING:
dotnet.write('IntPtr r = ')
elif result != VOID:
dotnet.write('%s r = ' % type2dotnet(result))
dotnet.write('LIB.%s(' % (name))
first = True
i = 0
for param in params:
if first:
first = False
else:
dotnet.write(', ')
if param_kind(param) == OUT:
if param_type(param) == STRING:
dotnet.write('out ')
else:
dotnet.write('ref ')
elif param_kind(param) == OUT_MANAGED_ARRAY:
dotnet.write('out ')
dotnet.write('a%d' % i)
i = i + 1
dotnet.write(');\n')
if name not in Unwrapped:
if name in NULLWrapped:
dotnet.write(" if (r == IntPtr.Zero)\n")
dotnet.write(" throw new Z3Exception(\"Object allocation failed.\");\n")
else:
if len(params) > 0 and param_type(params[0]) == CONTEXT:
dotnet.write(" Z3_error_code err = (Z3_error_code)LIB.Z3_get_error_code(a0);\n")
dotnet.write(" if (err != Z3_error_code.Z3_OK)\n")
dotnet.write(" throw new Z3Exception(Marshal.PtrToStringAnsi(LIB.Z3_get_error_msg(a0, (uint)err)));\n")
if result == STRING:
dotnet.write(" return Marshal.PtrToStringAnsi(r);\n")
elif result != VOID:
dotnet.write(" return r;\n")
dotnet.write(" }\n\n")
dotnet.write(" }\n\n")
dotnet.write("}\n\n")
def java_method_name(name):
result = ''
name = name[3:] # Remove Z3_
n = len(name)
i = 0
while i < n:
if name[i] == '_':
i = i + 1
if i < n:
result += name[i].upper()
else:
result += name[i]
i = i + 1
return result
# Return the type of the java array elements
def java_array_element_type(p):
if param_type(p) == INT or param_type(p) == UINT or param_type(p) == BOOL:
return 'jint'
else:
return 'jlong'
def mk_java(java_dir, package_name):
java_nativef = os.path.join(java_dir, 'Native.java')
java_wrapperf = os.path.join(java_dir, 'Native.cpp')
java_native = open(java_nativef, 'w')
java_native.write('// Automatically generated file\n')
java_native.write('package %s;\n' % package_name)
java_native.write('import %s.enumerations.*;\n' % package_name)
java_native.write('public final class Native {\n')
java_native.write(' public static class IntPtr { public int value; }\n')
java_native.write(' public static class LongPtr { public long value; }\n')
java_native.write(' public static class StringPtr { public String value; }\n')
java_native.write(' public static class ObjArrayPtr { public long[] value; }\n')
java_native.write(' public static class UIntArrayPtr { public int[] value; }\n')
java_native.write(' public static native void setInternalErrorHandler(long ctx);\n\n')
java_native.write(' static {\n')
java_native.write(' try { System.loadLibrary("z3java"); }\n')
java_native.write(' catch (UnsatisfiedLinkError ex) { System.loadLibrary("libz3java"); }\n')
java_native.write(' }\n')
java_native.write('\n')
for name, result, params in _dotnet_decls:
java_native.write(' protected static native %s INTERNAL%s(' % (type2java(result), java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('%s a%d' % (param2java(param), i))
i = i + 1
java_native.write(');\n')
java_native.write('\n\n')
# Exception wrappers
for name, result, params in _dotnet_decls:
java_native.write(' public static %s %s(' % (type2java(result), java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('%s a%d' % (param2java(param), i))
i = i + 1
java_native.write(')')
if (len(params) > 0 and param_type(params[0]) == CONTEXT) or name in NULLWrapped:
java_native.write(' throws Z3Exception')
java_native.write('\n')
java_native.write(' {\n')
java_native.write(' ')
if result != VOID:
java_native.write('%s res = ' % type2java(result))
java_native.write('INTERNAL%s(' % (java_method_name(name)))
first = True
i = 0
for param in params:
if first:
first = False
else:
java_native.write(', ')
java_native.write('a%d' % i)
i = i + 1
java_native.write(');\n')
if name not in Unwrapped:
if name in NULLWrapped:
java_native.write(" if (res == 0)\n")
java_native.write(" throw new Z3Exception(\"Object allocation failed.\");\n")
else:
if len(params) > 0 and param_type(params[0]) == CONTEXT:
java_native.write(' Z3_error_code err = Z3_error_code.fromInt(INTERNALgetErrorCode(a0));\n')
java_native.write(' if (err != Z3_error_code.Z3_OK)\n')
java_native.write(' throw new Z3Exception(INTERNALgetErrorMsg(a0, err.toInt()));\n')
if result != VOID:
java_native.write(' return res;\n')
java_native.write(' }\n\n')
java_native.write('}\n')
java_wrapper = open(java_wrapperf, 'w')
pkg_str = package_name.replace('.', '_')
java_wrapper.write('// Automatically generated file\n')
java_wrapper.write('#include<jni.h>\n')
java_wrapper.write('#include<stdlib.h>\n')
java_wrapper.write('#include"z3.h"\n')
java_wrapper.write('#ifdef __cplusplus\n')
java_wrapper.write('extern "C" {\n')
java_wrapper.write('#endif\n\n')
java_wrapper.write('#ifdef __GNUC__\n#if __GNUC__ >= 4\n#define DLL_VIS __attribute__ ((visibility ("default")))\n#else\n#define DLL_VIS\n#endif\n#else\n#define DLL_VIS\n#endif\n\n')
java_wrapper.write('#if defined(__LP64__) || defined(_WIN64)\n\n')
java_wrapper.write('#define GETLONGAELEMS(T,OLD,NEW) \\\n')
java_wrapper.write(' T * NEW = (OLD == 0) ? 0 : (T*) jenv->GetLongArrayElements(OLD, NULL);\n')
java_wrapper.write('#define RELEASELONGAELEMS(OLD,NEW) \\\n')
java_wrapper.write(' if (OLD != 0) jenv->ReleaseLongArrayElements(OLD, (jlong *) NEW, JNI_ABORT); \n\n')
java_wrapper.write('#define GETLONGAREGION(T,OLD,Z,SZ,NEW) \\\n')
java_wrapper.write(' jenv->GetLongArrayRegion(OLD,Z,(jsize)SZ,(jlong*)NEW); \n')
java_wrapper.write('#define SETLONGAREGION(OLD,Z,SZ,NEW) \\\n')
java_wrapper.write(' jenv->SetLongArrayRegion(OLD,Z,(jsize)SZ,(jlong*)NEW) \n\n')
java_wrapper.write('#else\n\n')
java_wrapper.write('#define GETLONGAELEMS(T,OLD,NEW) \\\n')
java_wrapper.write(' T * NEW = 0; { \\\n')
java_wrapper.write(' jlong * temp = (OLD == 0) ? 0 : jenv->GetLongArrayElements(OLD, NULL); \\\n')
java_wrapper.write(' unsigned int size = (OLD == 0) ? 0 :jenv->GetArrayLength(OLD); \\\n')
java_wrapper.write(' if (OLD != 0) { \\\n')
java_wrapper.write(' NEW = (T*) (new int[size]); \\\n')
java_wrapper.write(' for (unsigned i=0; i < size; i++) \\\n')
java_wrapper.write(' NEW[i] = reinterpret_cast<T>(temp[i]); \\\n')
java_wrapper.write(' jenv->ReleaseLongArrayElements(OLD, temp, JNI_ABORT); \\\n')
java_wrapper.write(' } \\\n')
java_wrapper.write(' } \n\n')
java_wrapper.write('#define RELEASELONGAELEMS(OLD,NEW) \\\n')
java_wrapper.write(' delete [] NEW; \n\n')
java_wrapper.write('#define GETLONGAREGION(T,OLD,Z,SZ,NEW) \\\n')
java_wrapper.write(' { \\\n')
java_wrapper.write(' jlong * temp = new jlong[SZ]; \\\n')
java_wrapper.write(' jenv->GetLongArrayRegion(OLD,Z,(jsize)SZ,(jlong*)temp); \\\n')
java_wrapper.write(' for (int i = 0; i < (SZ); i++) \\\n')
java_wrapper.write(' NEW[i] = reinterpret_cast<T>(temp[i]); \\\n')
java_wrapper.write(' delete [] temp; \\\n')
java_wrapper.write(' }\n\n')
java_wrapper.write('#define SETLONGAREGION(OLD,Z,SZ,NEW) \\\n')
java_wrapper.write(' { \\\n')
java_wrapper.write(' jlong * temp = new jlong[SZ]; \\\n')
java_wrapper.write(' for (int i = 0; i < (SZ); i++) \\\n')
java_wrapper.write(' temp[i] = reinterpret_cast<jlong>(NEW[i]); \\\n')
java_wrapper.write(' jenv->SetLongArrayRegion(OLD,Z,(jsize)SZ,temp); \\\n')
java_wrapper.write(' delete [] temp; \\\n')
java_wrapper.write(' }\n\n')
java_wrapper.write('#endif\n\n')
java_wrapper.write('void Z3JavaErrorHandler(Z3_context c, Z3_error_code e)\n')
java_wrapper.write('{\n')
java_wrapper.write(' // Internal do-nothing error handler. This is required to avoid that Z3 calls exit()\n')
java_wrapper.write(' // upon errors, but the actual error handling is done by throwing exceptions in the\n')
java_wrapper.write(' // wrappers below.\n')
java_wrapper.write('}\n\n')
java_wrapper.write('DLL_VIS JNIEXPORT void JNICALL Java_%s_Native_setInternalErrorHandler(JNIEnv * jenv, jclass cls, jlong a0)\n' % pkg_str)
java_wrapper.write('{\n')
java_wrapper.write(' Z3_set_error_handler((Z3_context)a0, Z3JavaErrorHandler);\n')
java_wrapper.write('}\n\n')
java_wrapper.write('')
for name, result, params in _dotnet_decls:
java_wrapper.write('DLL_VIS JNIEXPORT %s JNICALL Java_%s_Native_INTERNAL%s(JNIEnv * jenv, jclass cls' % (type2javaw(result), pkg_str, java_method_name(name)))
i = 0
for param in params:
java_wrapper.write(', ')
java_wrapper.write('%s a%d' % (param2javaw(param), i))
i = i + 1
java_wrapper.write(') {\n')
# preprocess arrays, strings, in/out arguments
i = 0
for param in params:
k = param_kind(param)
if k == OUT or k == INOUT:
java_wrapper.write(' %s _a%s;\n' % (type2str(param_type(param)), i))
elif k == IN_ARRAY or k == INOUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' %s * _a%s = (%s*) jenv->GetIntArrayElements(a%s, NULL);\n' % (type2str(param_type(param)), i, type2str(param_type(param)), i))
else:
java_wrapper.write(' GETLONGAELEMS(%s, a%s, _a%s);\n' % (type2str(param_type(param)), i, i))
elif k == OUT_ARRAY:
java_wrapper.write(' %s * _a%s = (%s *) malloc(((unsigned)a%s) * sizeof(%s));\n' % (type2str(param_type(param)),
i,
type2str(param_type(param)),
param_array_capacity_pos(param),
type2str(param_type(param))))
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->GetIntArrayRegion(a%s, 0, (jsize)a%s, (jint*)_a%s);\n' % (i, param_array_capacity_pos(param), i))
else:
java_wrapper.write(' GETLONGAREGION(%s, a%s, 0, a%s, _a%s);\n' % (type2str(param_type(param)), i, param_array_capacity_pos(param), i))
elif k == IN and param_type(param) == STRING:
java_wrapper.write(' Z3_string _a%s = (Z3_string) jenv->GetStringUTFChars(a%s, NULL);\n' % (i, i))
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write(' %s * _a%s = 0;\n' % (type2str(param_type(param)), i))
i = i + 1
# invoke procedure
java_wrapper.write(' ')
if result != VOID:
java_wrapper.write('%s result = ' % type2str(result))
java_wrapper.write('%s(' % name)
i = 0
first = True
for param in params:
if first:
first = False
else:
java_wrapper.write(', ')
k = param_kind(param)
if k == OUT or k == INOUT:
java_wrapper.write('&_a%s' % i)
elif k == OUT_ARRAY or k == IN_ARRAY or k == INOUT_ARRAY:
java_wrapper.write('_a%s' % i)
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write('&_a%s' % i)
elif k == IN and param_type(param) == STRING:
java_wrapper.write('_a%s' % i)
else:
java_wrapper.write('(%s)a%i' % (param2str(param), i))
i = i + 1
java_wrapper.write(');\n')
# cleanup
i = 0
for param in params:
k = param_kind(param)
if k == OUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->SetIntArrayRegion(a%s, 0, (jsize)a%s, (jint*)_a%s);\n' % (i, param_array_capacity_pos(param), i))
else:
java_wrapper.write(' SETLONGAREGION(a%s, 0, a%s, _a%s);\n' % (i, param_array_capacity_pos(param), i))
java_wrapper.write(' free(_a%s);\n' % i)
elif k == IN_ARRAY or k == OUT_ARRAY:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' jenv->ReleaseIntArrayElements(a%s, (jint*)_a%s, JNI_ABORT);\n' % (i, i))
else:
java_wrapper.write(' RELEASELONGAELEMS(a%s, _a%s);\n' % (i, i))
elif k == OUT or k == INOUT:
if param_type(param) == INT or param_type(param) == UINT or param_type(param) == BOOL:
java_wrapper.write(' {\n')
java_wrapper.write(' jclass mc = jenv->GetObjectClass(a%s);\n' % i)
java_wrapper.write(' jfieldID fid = jenv->GetFieldID(mc, "value", "I");\n')
java_wrapper.write(' jenv->SetIntField(a%s, fid, (jint) _a%s);\n' % (i, i))
java_wrapper.write(' }\n')
else:
java_wrapper.write(' {\n')
java_wrapper.write(' jclass mc = jenv->GetObjectClass(a%s);\n' % i)
java_wrapper.write(' jfieldID fid = jenv->GetFieldID(mc, "value", "J");\n')
java_wrapper.write(' jenv->SetLongField(a%s, fid, (jlong) _a%s);\n' % (i, i))
java_wrapper.write(' }\n')
elif k == OUT_MANAGED_ARRAY:
java_wrapper.write(' *(jlong**)a%s = (jlong*)_a%s;\n' % (i, i))
elif k == IN and param_type(param) == STRING:
java_wrapper.write(' jenv->ReleaseStringUTFChars(a%s, _a%s);\n' % (i, i));
i = i + 1
# return
if result == STRING:
java_wrapper.write(' return jenv->NewStringUTF(result);\n')
elif result != VOID:
java_wrapper.write(' return (%s) result;\n' % type2javaw(result))
java_wrapper.write('}\n')
java_wrapper.write('#ifdef __cplusplus\n')
java_wrapper.write('}\n')
java_wrapper.write('#endif\n')
if mk_util.is_verbose():
print("Generated '%s'" % java_nativef)
Type2Napi = { VOID : '', VOID_PTR : '', INT : 'number', UINT : 'number', INT64 : 'number', UINT64 : 'number', DOUBLE : 'number',
FLOAT : 'number', STRING : 'string', STRING_PTR : 'array',
BOOL : 'number', SYMBOL : 'external', PRINT_MODE : 'number', ERROR_CODE : 'number' }
def type2napi(t):
try:
return Type2Napi[t]
except:
return "external"
Type2NapiBuilder = { VOID : '', VOID_PTR : '', INT : 'int32', UINT : 'uint32', INT64 : 'int64', UINT64 : 'uint64', DOUBLE : 'double',
FLOAT : 'float', STRING : 'string', STRING_PTR : 'array',
BOOL : 'bool', SYMBOL : 'external', PRINT_MODE : 'int32', ERROR_CODE : 'int32' }
def type2napibuilder(t):
try:
return Type2NapiBuilder[t]
except:
return "external"
def mk_js(js_output_dir):
with open(os.path.join(js_output_dir, "z3.json"), 'w') as ous:
ous.write("{\n")
ous.write(" \"api\": [\n")
for name, result, params in _dotnet_decls:
ous.write(" {\n")
ous.write(" \"name\": \"%s\",\n" % name)
ous.write(" \"c_type\": \"%s\",\n" % Type2Str[result])
ous.write(" \"napi_type\": \"%s\",\n" % type2napi(result))
ous.write(" \"arg_list\": [")
first = True
for p in params:
if first:
first = False
ous.write("\n {\n")
else:
ous.write(",\n {\n")
t = param_type(p)
k = t
ous.write(" \"name\": \"%s\",\n" % "") # TBD
ous.write(" \"c_type\": \"%s\",\n" % type2str(t))
ous.write(" \"napi_type\": \"%s\",\n" % type2napi(t))
ous.write(" \"napi_builder\": \"%s\"\n" % type2napibuilder(t))
ous.write( " }")
ous.write("],\n")
ous.write(" \"napi_builder\": \"%s\"\n" % type2napibuilder(result))
ous.write(" },\n")
ous.write(" ]\n")
ous.write("}\n")
def mk_log_header(file, name, params):
file.write("void log_%s(" % name)
i = 0
for p in params:
if i > 0:
file.write(", ")
file.write("%s a%s" % (param2str(p), i))
i = i + 1
file.write(")")
def log_param(p):
kind = param_kind(p)
ty = param_type(p)
return is_obj(ty) and (kind == OUT or kind == INOUT or kind == OUT_ARRAY or kind == INOUT_ARRAY)
def log_result(result, params):
for p in params:
if log_param(p):
return True
return False
def mk_log_macro(file, name, params):
file.write("#define LOG_%s(" % name)
i = 0
for p in params:
if i > 0:
file.write(", ")
file.write("_ARG%s" % i)
i = i + 1
file.write(") z3_log_ctx _LOG_CTX; ")
auxs = set()
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
if cap not in auxs:
auxs.add(cap)
file.write("unsigned _Z3_UNUSED Z3ARG%s = 0; " % cap)
sz = param_array_size_pos(p)
if sz not in auxs:
auxs.add(sz)
file.write("unsigned * _Z3_UNUSED Z3ARG%s = 0; " % sz)
file.write("%s _Z3_UNUSED Z3ARG%s = 0; " % (param2str(p), i))
i = i + 1
file.write("if (_LOG_CTX.enabled()) { log_%s(" % name)
i = 0
for p in params:
if (i > 0):
file.write(', ')
file.write("_ARG%s" %i)
i = i + 1
file.write("); ")
auxs = set()
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
if cap not in auxs:
auxs.add(cap)
file.write("Z3ARG%s = _ARG%s; " % (cap, cap))
sz = param_array_size_pos(p)
if sz not in auxs:
auxs.add(sz)
file.write("Z3ARG%s = _ARG%s; " % (sz, sz))
file.write("Z3ARG%s = _ARG%s; " % (i, i))
i = i + 1
file.write("}\n")
def mk_log_result_macro(file, name, result, params):
file.write("#define RETURN_%s" % name)
if is_obj(result):
file.write("(Z3RES)")
file.write(" ")
file.write("if (_LOG_CTX.enabled()) { ")
if is_obj(result):
file.write("SetR(Z3RES); ")
i = 0
for p in params:
if log_param(p):
kind = param_kind(p)
if kind == OUT_ARRAY or kind == INOUT_ARRAY:
cap = param_array_capacity_pos(p)
sz = param_array_size_pos(p)
if cap == sz:
file.write("for (unsigned i = 0; i < Z3ARG%s; i++) { SetAO(Z3ARG%s[i], %s, i); } " % (sz, i, i))
else:
file.write("for (unsigned i = 0; Z3ARG%s && i < *Z3ARG%s; i++) { SetAO(Z3ARG%s[i], %s, i); } " % (sz, sz, i, i))
if kind == OUT or kind == INOUT:
file.write("SetO((Z3ARG%s == 0 ? 0 : *Z3ARG%s), %s); " % (i, i, i))
i = i + 1
file.write("} ")
if is_obj(result):
file.write("return Z3RES\n")
else:
file.write("return\n")
def mk_exec_header(file, name):
file.write("void exec_%s(z3_replayer & in)" % name)
def error(msg):
sys.stderr.write(msg)
exit(-1)
next_id = 0
API2Id = {}
def def_API(name, result, params):
global API2Id, next_id
global log_h, log_c
mk_py_binding(name, result, params)
reg_dotnet(name, result, params)
API2Id[next_id] = name
mk_log_header(log_h, name, params)
log_h.write(';\n')
mk_log_header(log_c, name, params)
log_c.write(' {\n R();\n')
mk_exec_header(exe_c, name)
exe_c.write(' {\n')
# Create Log function & Function call
i = 0
exe_c.write(" ")
if is_obj(result):
exe_c.write("%s result = " % type2str(result))
exe_c.write("%s(\n " % name)
for p in params:
kind = param_kind(p)
ty = param_type(p)
if (i > 0):
exe_c.write(",\n ")
if kind == IN:
if is_obj(ty):
log_c.write(" P(a%s);\n" % i)
exe_c.write("reinterpret_cast<%s>(in.get_obj(%s))" % (param2str(p), i))
elif ty == STRING:
log_c.write(" S(a%s);\n" % i)
exe_c.write("in.get_str(%s)" % i)
elif ty == SYMBOL:
log_c.write(" Sy(a%s);\n" % i)
exe_c.write("in.get_symbol(%s)" % i)
elif ty == UINT:
log_c.write(" U(a%s);\n" % i)
exe_c.write("in.get_uint(%s)" % i)
elif ty == UINT64:
log_c.write(" U(a%s);\n" % i)
exe_c.write("in.get_uint64(%s)" % i)
elif ty == INT:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_int(%s)" % i)
elif ty == INT64:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_int64(%s)" % i)
elif ty == DOUBLE:
log_c.write(" D(a%s);\n" % i)
exe_c.write("in.get_double(%s)" % i)
elif ty == FLOAT:
log_c.write(" D(a%s);\n" % i)
exe_c.write("in.get_float(%s)" % i)
elif ty == BOOL:
log_c.write(" I(a%s);\n" % i)
exe_c.write("in.get_bool(%s)" % i)
elif ty == PRINT_MODE or ty == ERROR_CODE:
log_c.write(" U(static_cast<unsigned>(a%s));\n" % i)
exe_c.write("static_cast<%s>(in.get_uint(%s))" % (type2str(ty), i))
else:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == INOUT:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == OUT:
if is_obj(ty):
log_c.write(" P(0);\n")
exe_c.write("reinterpret_cast<%s>(in.get_obj_addr(%s))" % (param2str(p), i))
elif ty == STRING:
log_c.write(" S(\"\");\n")
exe_c.write("in.get_str_addr(%s)" % i)
elif ty == UINT:
log_c.write(" U(0);\n")
exe_c.write("in.get_uint_addr(%s)" % i)
elif ty == UINT64:
log_c.write(" U(0);\n")
exe_c.write("in.get_uint64_addr(%s)" % i)
elif ty == INT:
log_c.write(" I(0);\n")
exe_c.write("in.get_int_addr(%s)" % i)
elif ty == INT64:
log_c.write(" I(0);\n")
exe_c.write("in.get_int64_addr(%s)" % i)
elif ty == VOID_PTR:
log_c.write(" P(0);\n")
exe_c.write("in.get_obj_addr(%s)" % i)
else:
error("unsupported parameter for %s, %s" % (name, p))
elif kind == IN_ARRAY or kind == INOUT_ARRAY:
sz = param_array_capacity_pos(p)
log_c.write(" for (unsigned i = 0; i < a%s; i++) { " % sz)
if is_obj(ty):
log_c.write("P(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Ap(a%s);\n" % sz)
exe_c.write("reinterpret_cast<%s*>(in.get_obj_array(%s))" % (type2str(ty), i))
elif ty == SYMBOL:
log_c.write("Sy(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Asy(a%s);\n" % sz)
exe_c.write("in.get_symbol_array(%s)" % i)
elif ty == UINT:
log_c.write("U(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Au(a%s);\n" % sz)
exe_c.write("in.get_uint_array(%s)" % i)
elif ty == INT:
log_c.write("I(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Ai(a%s);\n" % sz)
exe_c.write("in.get_int_array(%s)" % i)
elif ty == BOOL:
log_c.write("U(a%s[i]);" % i)
log_c.write(" }\n")
log_c.write(" Au(a%s);\n" % sz)
exe_c.write("in.get_bool_array(%s)" % i)
else:
error ("unsupported parameter for %s, %s, %s" % (ty, name, p))
elif kind == OUT_ARRAY:
sz = param_array_capacity_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if sz_p_k == OUT or sz_p_k == INOUT:
sz_e = ("(*a%s)" % sz)
else:
sz_e = ("a%s" % sz)
log_c.write(" for (unsigned i = 0; i < %s; i++) { " % sz_e)
if is_obj(ty):
log_c.write("P(0);")
log_c.write(" }\n")
log_c.write(" Ap(%s);\n" % sz_e)
exe_c.write("reinterpret_cast<%s*>(in.get_obj_array(%s))" % (tstr, i))
elif ty == UINT:
log_c.write("U(0);")
log_c.write(" }\n")
log_c.write(" Au(%s);\n" % sz_e)
exe_c.write("in.get_uint_array(%s)" % i)
else:
error ("unsupported parameter for %s, %s" % (name, p))
elif kind == OUT_MANAGED_ARRAY:
sz = param_array_size_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if sz_p_k == OUT or sz_p_k == INOUT:
sz_e = ("(*a%s)" % sz)
else:
sz_e = ("a%s" % sz)
log_c.write(" for (unsigned i = 0; i < %s; i++) { " % sz_e)
log_c.write("P(0);")
log_c.write(" }\n")
log_c.write(" Ap(%s);\n" % sz_e)
exe_c.write("reinterpret_cast<%s**>(in.get_obj_array(%s))" % (tstr, i))
else:
error ("unsupported parameter for %s, %s" % (name, p))
i = i + 1
log_c.write(" C(%s);\n" % next_id)
exe_c.write(");\n")
if is_obj(result):
exe_c.write(" in.store_result(result);\n")
if name == 'Z3_mk_context' or name == 'Z3_mk_context_rc':
exe_c.write(" Z3_set_error_handler(result, Z3_replayer_error_handler);")
log_c.write('}\n')
exe_c.write('}\n')
mk_log_macro(log_h, name, params)
if log_result(result, params):
mk_log_result_macro(log_h, name, result, params)
next_id = next_id + 1
def mk_bindings(exe_c):
exe_c.write("void register_z3_replayer_cmds(z3_replayer & in) {\n")
for key, val in API2Id.items():
exe_c.write(" in.register_cmd(%s, exec_%s, \"%s\");\n" % (key, val, val))
exe_c.write("}\n")
def ml_method_name(name):
return name[3:] # Remove Z3_
def is_out_param(p):
if param_kind(p) == OUT or param_kind(p) == INOUT or param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT_MANAGED_ARRAY:
return True
else:
return False
def outparams(params):
op = []
for param in params:
if is_out_param(param):
op.append(param)
return op
def is_in_param(p):
if param_kind(p) == IN or param_kind(p) == INOUT or param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY:
return True
else:
return False
def inparams(params):
ip = []
for param in params:
if is_in_param(param):
ip.append(param)
return ip
def is_array_param(p):
if param_kind(p) == IN_ARRAY or param_kind(p) == INOUT_ARRAY or param_kind(p) == OUT_ARRAY:
return True
else:
return False
def arrayparams(params):
op = []
for param in params:
if is_array_param(param):
op.append(param)
return op
def ml_plus_type(ts):
if ts == 'Z3_context':
return 'Z3_context_plus'
elif ts == 'Z3_ast' or ts == 'Z3_sort' or ts == 'Z3_func_decl' or ts == 'Z3_app' or ts == 'Z3_pattern':
return 'Z3_ast_plus'
elif ts == 'Z3_symbol':
return 'Z3_symbol_plus'
elif ts == 'Z3_constructor':
return 'Z3_constructor_plus'
elif ts == 'Z3_constructor_list':
return 'Z3_constructor_list_plus'
elif ts == 'Z3_rcf_num':
return 'Z3_rcf_num_plus'
elif ts == 'Z3_params':
return 'Z3_params_plus'
elif ts == 'Z3_param_descrs':
return 'Z3_param_descrs_plus'
elif ts == 'Z3_model':
return 'Z3_model_plus'
elif ts == 'Z3_func_interp':
return 'Z3_func_interp_plus'
elif ts == 'Z3_func_entry':
return 'Z3_func_entry_plus'
elif ts == 'Z3_goal':
return 'Z3_goal_plus'
elif ts == 'Z3_tactic':
return 'Z3_tactic_plus'
elif ts == 'Z3_probe':
return 'Z3_probe_plus'
elif ts == 'Z3_apply_result':
return 'Z3_apply_result_plus'
elif ts == 'Z3_solver':
return 'Z3_solver_plus'
elif ts == 'Z3_stats':
return 'Z3_stats_plus'
elif ts == 'Z3_ast_vector':
return 'Z3_ast_vector_plus'
elif ts == 'Z3_ast_map':
return 'Z3_ast_map_plus'
elif ts == 'Z3_fixedpoint':
return 'Z3_fixedpoint_plus'
elif ts == 'Z3_optimize':
return 'Z3_optimize_plus'
else:
return ts
def ml_minus_type(ts):
if ts == 'Z3_ast' or ts == 'Z3_sort' or ts == 'Z3_func_decl' or ts == 'Z3_app' or ts == 'Z3_pattern':
return 'Z3_ast'
if ts == 'Z3_ast_plus' or ts == 'Z3_sort_plus' or ts == 'Z3_func_decl_plus' or ts == 'Z3_app_plus' or ts == 'Z3_pattern_plus':
return 'Z3_ast'
elif ts == 'Z3_constructor_plus':
return 'Z3_constructor'
elif ts == 'Z3_constructor_list_plus':
return 'Z3_constructor_list'
elif ts == 'Z3_rcf_num_plus':
return 'Z3_rcf_num'
elif ts == 'Z3_params_plus':
return 'Z3_params'
elif ts == 'Z3_param_descrs_plus':
return 'Z3_param_descrs'
elif ts == 'Z3_model_plus':
return 'Z3_model'
elif ts == 'Z3_func_interp_plus':
return 'Z3_func_interp'
elif ts == 'Z3_func_entry_plus':
return 'Z3_func_entry'
elif ts == 'Z3_goal_plus':
return 'Z3_goal'
elif ts == 'Z3_tactic_plus':
return 'Z3_tactic'
elif ts == 'Z3_probe_plus':
return 'Z3_probe'
elif ts == 'Z3_apply_result_plus':
return 'Z3_apply_result'
elif ts == 'Z3_solver_plus':
return 'Z3_solver'
elif ts == 'Z3_stats_plus':
return 'Z3_stats'
elif ts == 'Z3_ast_vector_plus':
return 'Z3_ast_vector'
elif ts == 'Z3_ast_map_plus':
return 'Z3_ast_map'
elif ts == 'Z3_fixedpoint_plus':
return 'Z3_fixedpoint'
elif ts == 'Z3_optimize_plus':
return 'Z3_optimize'
else:
return ts
def ml_plus_type_raw(ts):
if ml_has_plus_type(ts):
return ml_plus_type(ts) + '_raw';
else:
return ts
def ml_plus_ops_type(ts):
if ml_has_plus_type(ts):
return ml_plus_type(ts) + '_custom_ops'
else:
return 'default_custom_ops'
def ml_has_plus_type(ts):
return ts != ml_plus_type(ts)
def ml_unwrap(t, ts, s):
if t == STRING:
return '(' + ts + ') String_val(' + s + ')'
elif t == BOOL or (type2str(t) == 'bool'):
return '(' + ts + ') Bool_val(' + s + ')'
elif t == INT or t == PRINT_MODE or t == ERROR_CODE:
return '(' + ts + ') Int_val(' + s + ')'
elif t == UINT:
return '(' + ts + ') Unsigned_int_val(' + s + ')'
elif t == INT64:
return '(' + ts + ') Long_val(' + s + ')'
elif t == UINT64:
return '(' + ts + ') Unsigned_long_val(' + s + ')'
elif t == DOUBLE:
return '(' + ts + ') Double_val(' + s + ')'
elif ml_has_plus_type(ts):
pts = ml_plus_type(ts)
return '(' + ts + ') ' + ml_plus_type_raw(ts) + '((' + pts + '*) Data_custom_val(' + s + '))'
else:
return '* ((' + ts + '*) Data_custom_val(' + s + '))'
def ml_set_wrap(t, d, n):
if t == VOID:
return d + ' = Val_unit;'
elif t == BOOL or (type2str(t) == 'bool'):
return d + ' = Val_bool(' + n + ');'
elif t == INT or t == UINT or t == PRINT_MODE or t == ERROR_CODE:
return d + ' = Val_int(' + n + ');'
elif t == INT64 or t == UINT64:
return d + ' = Val_long(' + n + ');'
elif t == DOUBLE:
return d + '= caml_copy_double(' + n + ');'
elif t == STRING:
return d + ' = caml_copy_string((const char*) ' + n + ');'
else:
pts = ml_plus_type(type2str(t))
return '*(' + pts + '*)Data_custom_val(' + d + ') = ' + n + ';'
def ml_alloc_and_store(t, lhs, rhs):
if t == VOID or t == BOOL or t == INT or t == UINT or t == PRINT_MODE or t == ERROR_CODE or t == INT64 or t == UINT64 or t == DOUBLE or t == STRING or (type2str(t) == 'bool'):
return ml_set_wrap(t, lhs, rhs)
else:
pts = ml_plus_type(type2str(t))
pops = ml_plus_ops_type(type2str(t))
alloc_str = '%s = caml_alloc_custom(&%s, sizeof(%s), 0, 1); ' % (lhs, pops, pts)
return alloc_str + ml_set_wrap(t, lhs, rhs)
def mk_ml(ml_src_dir, ml_output_dir):
global Type2Str
ml_nativef = os.path.join(ml_output_dir, 'z3native.ml')
ml_native = open(ml_nativef, 'w')
ml_native.write('(* Automatically generated file *)\n\n')
ml_pref = open(os.path.join(ml_src_dir, 'z3native.ml.pre'), 'r')
for s in ml_pref:
ml_native.write(s);
ml_pref.close()
ml_native.write('\n')
for name, result, params in _dotnet_decls:
ml_native.write('external %s : ' % ml_method_name(name))
ip = inparams(params)
op = outparams(params)
if len(ip) == 0:
ml_native.write(' unit -> ')
for p in ip:
ml_native.write('%s -> ' % param2ml(p))
if len(op) > 0:
ml_native.write('(')
first = True
if result != VOID or len(op) == 0:
ml_native.write('%s' % type2ml(result))
first = False
for p in op:
if first:
first = False
else:
ml_native.write(' * ')
ml_native.write('%s' % param2ml(p))
if len(op) > 0:
ml_native.write(')')
if len(ip) > 5:
ml_native.write(' = "n_%s_bytecode" "n_%s"\n' % (ml_method_name(name), ml_method_name(name)))
else:
ml_native.write(' = "n_%s"\n' % ml_method_name(name))
ml_native.write('\n')
# null pointer helpers
for type_id in Type2Str:
type_name = Type2Str[type_id]
if ml_has_plus_type(type_name) and not type_name in ['Z3_context', 'Z3_sort', 'Z3_func_decl', 'Z3_app', 'Z3_pattern']:
ml_name = type2ml(type_id)
ml_native.write('external context_of_%s : %s -> context = "n_context_of_%s"\n' % (ml_name, ml_name, ml_name))
ml_native.write('external is_null_%s : %s -> bool = "n_is_null_%s"\n' % (ml_name, ml_name, ml_name))
ml_native.write('external mk_null_%s : context -> %s = "n_mk_null_%s"\n\n' % (ml_name, ml_name, ml_name))
ml_native.write('(**/**)\n')
ml_native.close()
if mk_util.is_verbose():
print ('Generated "%s"' % ml_nativef)
mk_z3native_stubs_c(ml_src_dir, ml_output_dir)
z3_long_funs = frozenset([
'Z3_solver_check',
'Z3_solver_check_assumptions',
'Z3_simplify',
'Z3_simplify_ex',
])
z3_ml_overrides = frozenset([
'Z3_mk_config'
])
def mk_z3native_stubs_c(ml_src_dir, ml_output_dir): # C interface
ml_wrapperf = os.path.join(ml_output_dir, 'z3native_stubs.c')
ml_wrapper = open(ml_wrapperf, 'w')
ml_wrapper.write('// Automatically generated file\n\n')
ml_pref = open(os.path.join(ml_src_dir, 'z3native_stubs.c.pre'), 'r')
for s in ml_pref:
ml_wrapper.write(s);
ml_pref.close()
for name, result, params in _dotnet_decls:
if name in z3_ml_overrides:
continue
ip = inparams(params)
op = outparams(params)
ap = arrayparams(params)
ret_size = len(op)
if result != VOID:
ret_size = ret_size + 1
# Setup frame
ml_wrapper.write('CAMLprim DLL_PUBLIC value n_%s(' % ml_method_name(name))
first = True
i = 0
for p in params:
if is_in_param(p):
if first:
first = False
else:
ml_wrapper.write(', ')
ml_wrapper.write('value a%d' % i)
i = i + 1
ml_wrapper.write(') {\n')
ml_wrapper.write(' CAMLparam%d(' % len(ip))
i = 0
first = True
for p in params:
if is_in_param(p):
if first:
first = False
else:
ml_wrapper.write(', ')
ml_wrapper.write('a%d' % i)
i = i + 1
ml_wrapper.write(');\n')
i = 0
if len(op) + len(ap) == 0:
ml_wrapper.write(' CAMLlocal1(result);\n')
else:
c = 0
needs_tmp_value = False
for p in params:
if is_out_param(p) or is_array_param(p):
c = c + 1
needs_tmp_value = needs_tmp_value or param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY
if needs_tmp_value:
c = c + 1
if len(ap) > 0:
c = c + 1
ml_wrapper.write(' CAMLlocal%s(result, z3rv_val' % (c+2))
for p in params:
if is_out_param(p) or is_array_param(p):
ml_wrapper.write(', _a%s_val' % i)
i = i + 1
if needs_tmp_value:
ml_wrapper.write(', tmp_val')
if len(ap) != 0:
ml_wrapper.write(', _iter');
ml_wrapper.write(');\n')
if len(ap) > 0:
ml_wrapper.write(' unsigned _i;\n')
# determine if the function has a context as parameter.
have_context = (len(params) > 0) and (param_type(params[0]) == CONTEXT)
if have_context and name not in Unwrapped:
ml_wrapper.write(' Z3_error_code ec;\n')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' %s z3rv_m;\n' % ts)
ml_wrapper.write(' %s z3rv;\n' % pts)
else:
ml_wrapper.write(' %s z3rv;\n' % ts)
# declare all required local variables
# To comply with C89, we need to first declare the variables and initialize them
# only afterwards.
i = 0
for param in params:
if param_type(param) == CONTEXT and i == 0:
ml_wrapper.write(' Z3_context_plus ctx_p;\n')
ml_wrapper.write(' Z3_context _a0;\n')
else:
k = param_kind(param)
if k == OUT_ARRAY:
ml_wrapper.write(' %s * _a%s;\n' % (type2str(param_type(param)), i))
elif k == OUT_MANAGED_ARRAY:
ml_wrapper.write(' %s * _a%s;\n' % (type2str(param_type(param)), i))
elif k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' %s * _a%s;\n' % (ts, i))
elif k == IN:
t = param_type(param)
ml_wrapper.write(' %s _a%s;\n' % (type2str(t), i))
elif k == OUT or k == INOUT:
t = param_type(param)
ml_wrapper.write(' %s _a%s;\n' % (type2str(t), i))
ts = type2str(t)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' %s _a%dp;\n' % (pts, i))
i = i + 1
# End of variable declarations in outermost block:
# To comply with C89, no variable declarations may occur in the outermost block
# from that point onwards (breaks builds with at least VC 2012 and prior)
ml_wrapper.write('\n')
# Declare locals, preprocess arrays, strings, in/out arguments
i = 0
for param in params:
if param_type(param) == CONTEXT and i == 0:
ml_wrapper.write(' ctx_p = *(Z3_context_plus*) Data_custom_val(a' + str(i) + ');\n')
ml_wrapper.write(' _a0 = ctx_p->ctx;\n')
else:
k = param_kind(param)
if k == OUT_ARRAY:
ml_wrapper.write(' _a%s = (%s*) malloc(sizeof(%s) * (_a%s));\n' % (
i,
type2str(param_type(param)),
type2str(param_type(param)),
param_array_capacity_pos(param)))
elif k == OUT_MANAGED_ARRAY:
ml_wrapper.write(' _a%s = 0;\n' % i)
elif k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' _a%s = (%s*) malloc(sizeof(%s) * _a%s);\n' % (i, ts, ts, param_array_capacity_pos(param)))
elif k == IN:
t = param_type(param)
ml_wrapper.write(' _a%s = %s;\n' % (i, ml_unwrap(t, type2str(t), 'a' + str(i))))
i = i + 1
i = 0
for param in params:
k = param_kind(param)
if k == IN_ARRAY or k == INOUT_ARRAY:
t = param_type(param)
ts = type2str(t)
ml_wrapper.write(' _iter = a' + str(i) + ';\n')
ml_wrapper.write(' for (_i = 0; _i < _a%s; _i++) {\n' % param_array_capacity_pos(param))
ml_wrapper.write(' assert(_iter != Val_emptylist);\n')
ml_wrapper.write(' _a%s[_i] = %s;\n' % (i, ml_unwrap(t, ts, 'Field(_iter, 0)')))
ml_wrapper.write(' _iter = Field(_iter, 1);\n')
ml_wrapper.write(' }\n')
ml_wrapper.write(' assert(_iter == Val_emptylist);\n\n')
i = i + 1
release_caml_gc= name in z3_long_funs
if release_caml_gc:
ml_wrapper.write('\n caml_release_runtime_system();\n')
ml_wrapper.write('\n /* invoke Z3 function */\n ')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
ml_wrapper.write('z3rv_m = ')
else:
ml_wrapper.write('z3rv = ')
# invoke procedure
ml_wrapper.write('%s(' % name)
i = 0
first = True
for param in params:
if first:
first = False
else:
ml_wrapper.write(', ')
k = param_kind(param)
if k == OUT or k == INOUT or k == OUT_MANAGED_ARRAY:
ml_wrapper.write('&_a%s' % i)
else:
ml_wrapper.write('_a%i' % i)
i = i + 1
ml_wrapper.write(');\n')
if name in NULLWrapped:
ml_wrapper.write(' if (z3rv_m == NULL) {\n')
ml_wrapper.write(' caml_raise_with_string(*caml_named_value("Z3EXCEPTION"), "Object allocation failed");\n')
ml_wrapper.write(' }\n')
if release_caml_gc:
ml_wrapper.write('\n caml_acquire_runtime_system();\n')
if have_context and name not in Unwrapped:
ml_wrapper.write(' ec = Z3_get_error_code(ctx_p->ctx);\n')
ml_wrapper.write(' if (ec != Z3_OK) {\n')
ml_wrapper.write(' const char * msg = Z3_get_error_msg(ctx_p->ctx, ec);\n')
ml_wrapper.write(' caml_raise_with_string(*caml_named_value("Z3EXCEPTION"), msg);\n')
ml_wrapper.write(' }\n')
if result != VOID:
ts = type2str(result)
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
if name in NULLWrapped:
ml_wrapper.write(' z3rv = %s_mk(z3rv_m);\n' % pts)
else:
ml_wrapper.write(' z3rv = %s_mk(ctx_p, (%s) z3rv_m);\n' % (pts, ml_minus_type(ts)))
# convert output params
if len(op) > 0:
# we have output parameters (i.e. call-by-reference arguments to the Z3 native
# code function). Hence, the value returned by the OCaml native wrapper is a tuple
# which contains the Z3 native function's return value (if it is non-void) in its
# first and the output parameters in the following components.
ml_wrapper.write('\n /* construct return tuple */\n')
ml_wrapper.write(' result = caml_alloc(%s, 0);\n' % ret_size)
i = 0
for p in params:
pt = param_type(p)
ts = type2str(pt)
if param_kind(p) == OUT_ARRAY or param_kind(p) == INOUT_ARRAY:
# convert a C-array into an OCaml list and return it
ml_wrapper.write('\n _a%s_val = Val_emptylist;\n' % i)
ml_wrapper.write(' for (_i = _a%s; _i > 0; _i--) {\n' % param_array_capacity_pos(p))
pts = ml_plus_type(ts)
pops = ml_plus_ops_type(ts)
if ml_has_plus_type(ts):
ml_wrapper.write(' %s _a%dp = %s_mk(ctx_p, (%s) _a%d[_i - 1]);\n' % (pts, i, pts, ml_minus_type(ts), i))
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, 'tmp_val', '_a%dp' % i))
else:
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, 'tmp_val', '_a%d[_i - 1]' % i))
ml_wrapper.write(' _iter = caml_alloc(2,0);\n')
ml_wrapper.write(' Store_field(_iter, 0, tmp_val);\n')
ml_wrapper.write(' Store_field(_iter, 1, _a%s_val);\n' % i)
ml_wrapper.write(' _a%s_val = _iter;\n' % i)
ml_wrapper.write(' }\n\n')
elif param_kind(p) == OUT_MANAGED_ARRAY:
wrp = ml_set_wrap(pt, '_a%d_val' % i, '_a%d' % i)
wrp = wrp.replace('*)', '**)')
wrp = wrp.replace('_plus', '')
ml_wrapper.write(' %s\n' % wrp)
elif is_out_param(p):
if ml_has_plus_type(ts):
pts = ml_plus_type(ts)
ml_wrapper.write(' _a%dp = %s_mk(ctx_p, (%s) _a%d);\n' % (i, pts, ml_minus_type(ts), i))
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, '_a%d_val' % i, '_a%dp' % i))
else:
ml_wrapper.write(' %s\n' % ml_alloc_and_store(pt, '_a%d_val' % i, '_a%d' % i))
i = i + 1
# return tuples
i = j = 0
if result != VOID:
ml_wrapper.write(' %s' % ml_alloc_and_store(result, 'z3rv_val', 'z3rv'))
ml_wrapper.write(' Store_field(result, 0, z3rv_val);\n')
j = j + 1
for p in params:
if is_out_param(p):
ml_wrapper.write(' Store_field(result, %s, _a%s_val);\n' % (j, i))
j = j + 1
i = i + 1
else:
# As we have no output parameters, we simply return the result
ml_wrapper.write('\n /* construct simple return value */\n')
ml_wrapper.write(' %s' % ml_alloc_and_store(result, "result", "z3rv"))
# local array cleanup
ml_wrapper.write('\n /* cleanup and return */\n')
i = 0
for p in params:
k = param_kind(p)
if k == OUT_ARRAY or k == IN_ARRAY or k == INOUT_ARRAY:
ml_wrapper.write(' free(_a%s);\n' % i)
i = i + 1
# return
ml_wrapper.write(' CAMLreturn(result);\n')
ml_wrapper.write('}\n\n')
if len(ip) > 5:
ml_wrapper.write('CAMLprim DLL_PUBLIC value n_%s_bytecode(value * argv, int argn) {\n' % ml_method_name(name))
ml_wrapper.write(' return n_%s(' % ml_method_name(name))
i = 0
while i < len(ip):
if i == 0:
ml_wrapper.write('argv[0]')
else:
ml_wrapper.write(', argv[%s]' % i)
i = i + 1
ml_wrapper.write(');\n}\n')
ml_wrapper.write('\n\n')
ml_wrapper.write('#ifdef __cplusplus\n')
ml_wrapper.write('}\n')
ml_wrapper.write('#endif\n')
if mk_util.is_verbose():
print ('Generated "%s"' % ml_wrapperf)
# Collect API(...) commands from
def def_APIs(api_files):
pat1 = re.compile(" *def_API.*")
pat2 = re.compile(" *extra_API.*")
for api_file in api_files:
api = open(api_file, 'r')
for line in api:
line = line.strip('\r\n\t ')
try:
m = pat1.match(line)
if m:
eval(line)
m = pat2.match(line)
if m:
eval(line)
except Exception:
raise mk_exec_header.MKException("Failed to process API definition: %s" % line)
def write_log_h_preamble(log_h):
log_h.write('// Automatically generated file\n')
log_h.write('#include\"api/z3.h\"\n')
log_h.write('#ifdef __GNUC__\n')
log_h.write('#define _Z3_UNUSED __attribute__((unused))\n')
log_h.write('#else\n')
log_h.write('#define _Z3_UNUSED\n')
log_h.write('#endif\n')
#
log_h.write('#include<iostream>\n')
log_h.write('extern std::ostream * g_z3_log;\n')
log_h.write('extern bool g_z3_log_enabled;\n')
log_h.write('class z3_log_ctx { bool m_prev; public: z3_log_ctx():m_prev(g_z3_log_enabled) { g_z3_log_enabled = false; } ~z3_log_ctx() { g_z3_log_enabled = m_prev; } bool enabled() const { return m_prev; } };\n')
log_h.write('inline void SetR(void * obj) { *g_z3_log << "= " << obj << "\\n"; }\ninline void SetO(void * obj, unsigned pos) { *g_z3_log << "* " << obj << " " << pos << "\\n"; } \ninline void SetAO(void * obj, unsigned pos, unsigned idx) { *g_z3_log << "@ " << obj << " " << pos << " " << idx << "\\n"; }\n')
log_h.write('#define RETURN_Z3(Z3RES) if (_LOG_CTX.enabled()) { SetR(Z3RES); } return Z3RES\n')
log_h.write('void _Z3_append_log(char const * msg);\n')
def write_log_c_preamble(log_c):
log_c.write('// Automatically generated file\n')
log_c.write('#include<iostream>\n')
log_c.write('#include\"api/z3.h\"\n')
log_c.write('#include\"api/api_log_macros.h\"\n')
log_c.write('#include\"api/z3_logger.h\"\n')
def write_exe_c_preamble(exe_c):
exe_c.write('// Automatically generated file\n')
exe_c.write('#include\"api/z3.h\"\n')
exe_c.write('#include\"api/z3_replayer.h\"\n')
#
exe_c.write('void Z3_replayer_error_handler(Z3_context ctx, Z3_error_code c) { printf("[REPLAYER ERROR HANDLER]: %s\\n", Z3_get_error_msg(ctx, c)); }\n')
def write_core_py_post(core_py):
core_py.write("""
# Clean up
del _lib
del _default_dirs
del _all_dirs
del _ext
""")
def write_core_py_preamble(core_py):
core_py.write(
"""
# Automatically generated file
import sys, os
import ctypes
import pkg_resources
from .z3types import *
from .z3consts import *
_ext = 'dll' if sys.platform in ('win32', 'cygwin') else 'dylib' if sys.platform == 'darwin' else 'so'
_lib = None
_default_dirs = ['.',
os.path.dirname(os.path.abspath(__file__)),
pkg_resources.resource_filename('z3', 'lib'),
os.path.join(sys.prefix, 'lib'),
None]
_all_dirs = []
if sys.version < '3':
import __builtin__
if hasattr(__builtin__, "Z3_LIB_DIRS"):
_all_dirs = __builtin__.Z3_LIB_DIRS
else:
import builtins
if hasattr(builtins, "Z3_LIB_DIRS"):
_all_dirs = builtins.Z3_LIB_DIRS
for v in ('Z3_LIBRARY_PATH', 'PATH', 'PYTHONPATH'):
if v in os.environ:
lp = os.environ[v];
lds = lp.split(';') if sys.platform in ('win32') else lp.split(':')
_all_dirs.extend(lds)
_all_dirs.extend(_default_dirs)
_failures = []
for d in _all_dirs:
try:
d = os.path.realpath(d)
if os.path.isdir(d):
d = os.path.join(d, 'libz3.%s' % _ext)
if os.path.isfile(d):
_lib = ctypes.CDLL(d)
break
except Exception as e:
_failures += [e]
pass
if _lib is None:
# If all else failed, ask the system to find it.
try:
_lib = ctypes.CDLL('libz3.%s' % _ext)
except Exception as e:
_failures += [e]
pass
if _lib is None:
print("Could not find libz3.%s; consider adding the directory containing it to" % _ext)
print(" - your system's PATH environment variable,")
print(" - the Z3_LIBRARY_PATH environment variable, or ")
print(" - to the custom Z3_LIBRARY_DIRS Python-builtin before importing the z3 module, e.g. via")
if sys.version < '3':
print(" import __builtin__")
print(" __builtin__.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
else:
print(" import builtins")
print(" builtins.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
raise Z3Exception("libz3.%s not found." % _ext)
def _to_ascii(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except:
# kick the bucket down the road. :-J
return s
else:
return s
if sys.version < '3':
def _to_pystr(s):
return s
else:
def _to_pystr(s):
if s != None:
enc = sys.stdout.encoding
if enc != None: return s.decode(enc)
else: return s.decode('ascii')
else:
return ""
_error_handler_type = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_uint)
_lib.Z3_set_error_handler.restype = None
_lib.Z3_set_error_handler.argtypes = [ContextObj, _error_handler_type]
"""
)
log_h = None
log_c = None
exe_c = None
core_py = None
# FIXME: This can only be called once from this module
# due to its use of global state!
def generate_files(api_files,
api_output_dir=None,
z3py_output_dir=None,
dotnet_output_dir=None,
java_output_dir=None,
java_package_name=None,
js_output_dir=None,
ml_output_dir=None,
ml_src_dir=None):
"""
Scan the api files in ``api_files`` and emit the relevant API files into
the output directories specified. If an output directory is set to ``None``
then the files for that language binding or module are not emitted.
The reason for this function interface is:
* The CMake build system needs to control where
files are emitted.
* The CMake build system needs to be able to choose
which API files are emitted.
* This function should be as decoupled from the Python
build system as much as possible but it must be possible
for the Python build system code to use this function.
Therefore we:
* Do not use the ``mk_util.is_*_enabled()`` functions
to determine if certain files should be or should not be emitted.
* Do not use the components declared in the Python build system
to determine the output directory paths.
"""
# FIXME: These should not be global
global log_h, log_c, exe_c, core_py
assert isinstance(api_files, list)
# Hack: Avoid emitting files when we don't want them
# by writing to temporary files that get deleted when
# closed. This allows us to work around the fact that
# existing code is designed to always emit these files.
def mk_file_or_temp(output_dir, file_name, mode='w'):
if output_dir != None:
assert os.path.exists(output_dir) and os.path.isdir(output_dir)
return open(os.path.join(output_dir, file_name), mode)
else:
# Return a file that we can write to without caring
print("Faking emission of '{}'".format(file_name))
import tempfile
return tempfile.TemporaryFile(mode=mode)
with mk_file_or_temp(api_output_dir, 'api_log_macros.h') as log_h:
with mk_file_or_temp(api_output_dir, 'api_log_macros.cpp') as log_c:
with mk_file_or_temp(api_output_dir, 'api_commands.cpp') as exe_c:
with mk_file_or_temp(z3py_output_dir, os.path.join('z3', 'z3core.py')) as core_py:
# Write preambles
write_log_h_preamble(log_h)
write_log_c_preamble(log_c)
write_exe_c_preamble(exe_c)
write_core_py_preamble(core_py)
# FIXME: these functions are awful
def_Types(api_files)
def_APIs(api_files)
mk_bindings(exe_c)
mk_py_wrappers()
write_core_py_post(core_py)
if mk_util.is_verbose():
print("Generated '{}'".format(log_h.name))
print("Generated '{}'".format(log_c.name))
print("Generated '{}'".format(exe_c.name))
print("Generated '{}'".format(core_py.name))
if dotnet_output_dir:
with open(os.path.join(dotnet_output_dir, 'Native.cs'), 'w') as dotnet_file:
mk_dotnet(dotnet_file)
mk_dotnet_wrappers(dotnet_file)
if mk_util.is_verbose():
print("Generated '{}'".format(dotnet_file.name))
if java_output_dir:
mk_java(java_output_dir, java_package_name)
if ml_output_dir:
assert not ml_src_dir is None
mk_ml(ml_src_dir, ml_output_dir)
if js_output_dir:
mk_js(js_output_dir)
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("api_files",
nargs="+",
help="API header files to generate files from")
parser.add_argument("--api_output_dir",
default=None,
help="Directory to emit files for api module. If not specified no files are emitted.")
parser.add_argument("--z3py-output-dir",
dest="z3py_output_dir",
default=None,
help="Directory to emit z3py files. If not specified no files are emitted.")
parser.add_argument("--dotnet-output-dir",
dest="dotnet_output_dir",
default=None,
help="Directory to emit dotnet files. If not specified no files are emitted.")
parser.add_argument("--java-output-dir",
dest="java_output_dir",
default=None,
help="Directory to emit Java files. If not specified no files are emitted.")
parser.add_argument("--java-package-name",
dest="java_package_name",
default=None,
help="Name to give the Java package (e.g. ``com.microsoft.z3``).")
parser.add_argument("--ml-src-dir",
dest="ml_src_dir",
default=None,
help="Directory containing OCaml source files. If not specified no files are emitted")
parser.add_argument("--ml-output-dir",
dest="ml_output_dir",
default=None,
help="Directory to emit OCaml files. If not specified no files are emitted.")
parser.add_argument("--js_output_dir",
dest="js_output_dir",
default=None,
help="Directory to emit js bindings. If not specified no files are emitted.")
pargs = parser.parse_args(args)
if pargs.java_output_dir:
if pargs.java_package_name == None:
logging.error('--java-package-name must be specified')
return 1
if pargs.ml_output_dir:
if pargs.ml_src_dir is None:
logging.error('--ml-src-dir must be specified')
return 1
for api_file in pargs.api_files:
if not os.path.exists(api_file):
logging.error('"{}" does not exist'.format(api_file))
return 1
generate_files(api_files=pargs.api_files,
api_output_dir=pargs.api_output_dir,
z3py_output_dir=pargs.z3py_output_dir,
dotnet_output_dir=pargs.dotnet_output_dir,
java_output_dir=pargs.java_output_dir,
java_package_name=pargs.java_package_name,
js_output_dir=pargs.js_output_dir,
ml_output_dir=pargs.ml_output_dir,
ml_src_dir=pargs.ml_src_dir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 39.39869
| 310
| 0.535546
|
bf4ba273bad55c09bcd1cee04a5a32d8d558f4c3
| 661
|
py
|
Python
|
src/quicknlp/data/__init__.py
|
jalajthanaki/quick-nlp
|
861a54c9e30de076a2316cb6712d934de4058cc5
|
[
"MIT"
] | 287
|
2018-04-10T10:58:09.000Z
|
2022-03-22T02:05:40.000Z
|
src/quicknlp/data/__init__.py
|
scutcyr/quick-nlp
|
861a54c9e30de076a2316cb6712d934de4058cc5
|
[
"MIT"
] | 1
|
2018-07-03T17:10:03.000Z
|
2018-07-03T17:10:03.000Z
|
src/quicknlp/data/__init__.py
|
scutcyr/quick-nlp
|
861a54c9e30de076a2316cb6712d934de4058cc5
|
[
"MIT"
] | 51
|
2018-04-10T11:38:02.000Z
|
2021-10-17T06:23:43.000Z
|
from .data_loaders import DialogueDataLoader
from .datasets import DialogueDataset, HierarchicalDatasetFromDataFrame, HierarchicalDatasetFromFiles, \
TabularDatasetFromDataFrame, TabularDatasetFromFiles, DialDataset, HREDDataset, HREDConstraintsDataset
from .dialogue_analysis import DialogueAnalysis
from .dialogue_model_data_loader import CVAEModelData, HREDModelData, HREDAttentionModelData
from .hierarchical_model_data_loader import HierarchicalModelData
from .s2s_model_data_loader import S2SAttentionModelData, S2SModelData, TransformerModelData
from .sampler import DialogueRandomSampler, DialogueSampler
from .spacy_tokenizer import SpacyTokenizer
| 66.1
| 106
| 0.8941
|
efa4d54f271b33da954bc3325e62fd109630591f
| 9,745
|
py
|
Python
|
stdplugins/glink.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | null | null | null |
stdplugins/glink.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | null | null | null |
stdplugins/glink.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | null | null | null |
"""Upload link to gDrive
Syntax:
.glink"""
import asyncio
import math
import os
import time
from pySmartDL import SmartDL
from telethon import events
from datetime import datetime
from googleapiclient.discovery import build
from apiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from mimetypes import guess_type
import httplib2
from uniborg.util import admin_cmd, progress, humanbytes
# Path to token json file, it should be in same directory as script
G_DRIVE_TOKEN_FILE = Config.TMP_DOWNLOAD_DIRECTORY + "/auth_token.txt"
# Copy your credentials from the APIs Console
CLIENT_ID = Config.G_DRIVE_CLIENT_ID
CLIENT_SECRET = Config.G_DRIVE_CLIENT_SECRET
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = "https://www.googleapis.com/auth/drive.file"
# Redirect URI for installed apps, can be left as is
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
# global variable to set Folder ID to upload to
parent_id = Config.GDRIVE_FOLDER_ID
@borg.on(admin_cmd(pattern="glink ?(.*)", allow_sudo=True))
async def download(dryb):
""" For .gdrive command, upload files to google drive. """
if not dryb.text[0].isalpha() and dryb.text[0] not in ("/", "#", "@", "!"):
if dryb.fwd_from:
return
await dryb.edit("Processing ...")
input_str = dryb.pattern_match.group(1)
if CLIENT_ID is None or CLIENT_SECRET is None:
return false
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
required_file_name = None
elif input_str:
start = datetime.now()
url = input_str
file_name = os.path.basename(url)
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
file_name = file_name.strip()
downloaded_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "" + file_name
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
c_time = time.time()
display_message = None
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress() * 100
downloader.get_speed()
round(diff) * 1000
progress_str = "[{0}{1}]\nProgress: {2}%".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"{status}...\nURL: {url}\nFile Name: {file_name}\n{progress_str}\n{humanbytes(downloaded)} of {humanbytes(total_length)}\nETA: {estimated_total_time}"
if current_message != display_message:
await dryb.edit(current_message)
display_message = current_message
await asyncio.sleep(3)
except Exception as e:
logger.info(str(e))
end = datetime.now()
ms = (end - start).seconds
if downloader.isSuccessful():
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to Google Drive...".format(
downloaded_file_name, ms)
)
required_file_name = downloaded_file_name
else:
await dryb.edit(
"Incorrect URL\n{}".format(url)
)
elif dryb.reply_to_msg_id:
start = datetime.now()
try:
c_time = time.time()
downloaded_file_name = await dryb.client.download_media(
await dryb.get_reply_message(),
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, dryb, c_time, "Downloading...")
)
)
except Exception as e: # pylint:disable=C0103,W0703
await dryb.edit(str(e))
else:
end = datetime.now()
required_file_name = downloaded_file_name
ms = (end - start).seconds
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to GDrive...".format(
downloaded_file_name, ms)
)
if required_file_name:
#
if Config.G_DRIVE_AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(Config.G_DRIVE_AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting
# authorization code
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, dryb)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL
# for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
file_name, mime_type = file_ops(required_file_name)
# required_file_name will have the full path
# Sometimes API fails to retrieve starting URI, we wrap it.
try:
g_drive_link = await upload_file(http, required_file_name, file_name, mime_type, dryb)
await dryb.edit(f"File:`{required_file_name}`\nHas Successfully Uploaded to : [Google Drive]({g_drive_link})")
except Exception as e:
await dryb.edit(f"Error while uploading to Google Drive\nError Code:\n`{e}`")
# Get mime type and name of given file
def file_ops(file_path):
mime_type = guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
file_name = file_path.split("/")[-1]
return file_name, mime_type
async def create_token_file(token_file, event):
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(
CLIENT_ID,
CLIENT_SECRET,
OAUTH_SCOPE,
redirect_uri=REDIRECT_URI
)
authorize_url = flow.step1_get_authorize_url()
async with event.client.conversation(Config.PRIVATE_GROUP_BOT_API_ID) as conv:
await conv.send_message(f"Go to the following link in your browser: {authorize_url} and reply the code")
response = conv.wait_event(events.NewMessage(
outgoing=True,
chats=Config.PRIVATE_GROUP_BOT_API_ID
))
response = await response
code = response.message.message.strip()
credentials = flow.step2_exchange(code)
storage = Storage(token_file)
storage.put(credentials)
return storage
def authorize(token_file, storage):
# Get credentials
if storage is None:
storage = Storage(token_file)
credentials = storage.get()
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
credentials.refresh(http)
http = credentials.authorize(http)
return http
async def upload_file(http, file_path, file_name, mime_type, event):
# Create Google Drive service instance
drive_service = build("drive", "v2", http=http, cache_discovery=False)
# File body description
media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True)
body = {
"title": file_name,
"description": "Uploaded using PaperplaneExtended Userbot.",
"mimeType": mime_type,
}
if parent_id:
body["parents"] = [{"id": parent_id}]
# Permissions body description: anyone who has link can upload
# Other permissions can be found at
# https://developers.google.com/drive/v2/reference/permissions
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
# Insert a file
file = drive_service.files().insert(body=body, media_body=media_body)
response = None
while response is None:
status, response = file.next_chunk()
await asyncio.sleep(20)
if status:
percentage = int(status.progress() * 100)
progress_str = "[{0}{1}]\nProgress: {2}%\n".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
await event.edit(f"Uploading to Google Drive...\n\nFile Name: {file_name}\n{progress_str}")
if file:
await event.edit(file_name + " Uploaded Successfully")
# Insert new permissions
drive_service.permissions().insert(
fileId=response.get('id'),
body=permissions).execute()
# Define file instance and get url for download
file = drive_service.files().get(fileId=response.get('id')).execute()
download_url = response.get("webContentLink")
return download_url
@borg.on(admin_cmd(pattern="gfolder ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
folder_link = "https://drive.google.com/drive/u/2/folders/" + parent_id
await event.edit(f"Your current Google Drive Upload Directory : [Here]({folder_link})")
| 42.00431
| 189
| 0.621139
|
e0eeecc70172c3fea1d42098172b2cd28c96765f
| 162,091
|
py
|
Python
|
jira/client.py
|
shotgunsoftware/jira
|
8837db4c37a17614a903bbca3c9c477ad2c85ad6
|
[
"BSD-2-Clause"
] | 1
|
2022-02-02T16:39:26.000Z
|
2022-02-02T16:39:26.000Z
|
jira/client.py
|
shotgunsoftware/jira
|
8837db4c37a17614a903bbca3c9c477ad2c85ad6
|
[
"BSD-2-Clause"
] | 1
|
2019-10-21T13:05:50.000Z
|
2019-10-21T13:05:50.000Z
|
jira/client.py
|
shotgunsoftware/jira
|
8837db4c37a17614a903bbca3c9c477ad2c85ad6
|
[
"BSD-2-Clause"
] | 1
|
2021-10-03T14:47:44.000Z
|
2021-10-03T14:47:44.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from requests.auth import AuthBase
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira.readthedocs.io/en/latest/
"""
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
from functools import wraps
import imghdr
import mimetypes
import collections
import copy
import json
import logging
import os
import re
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
import calendar
import datetime
import hashlib
from numbers import Number
import requests
import sys
import time
import warnings
from requests.utils import get_netrc_auth
from six import iteritems
from six.moves.urllib.parse import urlparse
# GreenHopper specific resources
from jira.exceptions import JIRAError
from jira.resilientsession import raise_on_error
from jira.resilientsession import ResilientSession
# JIRA specific resources
from jira.resources import Attachment
from jira.resources import Board
from jira.resources import Comment
from jira.resources import Component
from jira.resources import Customer
from jira.resources import CustomFieldOption
from jira.resources import Dashboard
from jira.resources import Filter
from jira.resources import GreenHopperResource
from jira.resources import Issue
from jira.resources import IssueLink
from jira.resources import IssueLinkType
from jira.resources import IssueType
from jira.resources import Priority
from jira.resources import Project
from jira.resources import RemoteLink
from jira.resources import RequestType
from jira.resources import Resolution
from jira.resources import Resource
from jira.resources import Role
from jira.resources import SecurityLevel
from jira.resources import ServiceDesk
from jira.resources import Sprint
from jira.resources import Status
from jira.resources import StatusCategory
from jira.resources import User
from jira.resources import Group
from jira.resources import Version
from jira.resources import Votes
from jira.resources import Watchers
from jira.resources import Worklog
from jira import __version__
from jira.utils import CaseInsensitiveDict
from jira.utils import json_loads
from jira.utils import threaded_requests
from pkg_resources import parse_version
from collections import OrderedDict
from six import integer_types
from six import string_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
try:
# noinspection PyUnresolvedReferences
from requests_toolbelt import MultipartEncoder
except ImportError:
pass
try:
from requests_jwt import JWTAuth
except ImportError:
pass
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warning("Python default encoding is '%s' instead of 'UTF8' " \
# "which means that there is a big change of having problems. " \
# "Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
logging.getLogger('jira').addHandler(NullHandler())
def translate_resource_args(func):
"""Decorator that converts Issue and Project resources to their keys when used as arguments."""
@wraps(func)
def wrapper(*args, **kwargs):
"""
:type args: *Any
:type kwargs: **Any
:return: Any
"""
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
def _field_worker(fields=None,
**fieldargs
):
"""
:type fields: Optional[Dict[str, Any]]
:type fieldargs: **Any
:return: Union[Dict[str, Dict[str, Any]], Dict[str, Dict[str, str]]]
"""
if fields is not None:
return {'fields': fields}
return {'fields': fieldargs}
class ResultList(list):
def __init__(self, iterable=None, _startAt=0, _maxResults=0, _total=0, _isLast=None):
"""
:type iterable: Any
:type _startAt: int
:type _maxResults: int
:type _total: int
:type isLast: Optional[bool]
"""
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.startAt = _startAt
self.maxResults = _maxResults
# Optional parameters:
self.isLast = _isLast
self.total = _total
self.iterable = iterable or []
self.current = self.startAt
def __next__(self):
"""
:return: int
"""
self.current += 1
if self.current > self.total:
raise StopIteration
else:
return self.iterable[self.current - 1]
# Python 2 and 3 compat
next = __next__
class QshGenerator(object):
def __init__(self, context_path):
self.context_path = context_path
def __call__(self, req):
parse_result = urlparse(req.url)
path = parse_result.path[len(self.context_path):] if len(self.context_path) > 1 else parse_result.path
# Per Atlassian docs, use %20 for whitespace when generating qsh for URL
# https://developer.atlassian.com/cloud/jira/platform/understanding-jwt/#qsh
query = '&'.join(sorted(parse_result.query.split("&"))).replace('+', '%20')
qsh = '%(method)s&%(path)s&%(query)s' % {'method': req.method.upper(), 'path': path, 'query': query}
return hashlib.sha256(qsh.encode('utf-8')).hexdigest()
class JiraCookieAuth(AuthBase):
"""Jira Cookie Authentication
Allows using cookie authentication as described by
https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-cookie-based-authentication
"""
def __init__(self, session, _get_session, auth):
self._session = session
self._get_session = _get_session
self.__auth = auth
def handle_401(self, response, **kwargs):
if response.status_code != 401:
return response
self.init_session()
response = self.process_original_request(response.request.copy())
return response
def process_original_request(self, original_request):
self.update_cookies(original_request)
return self.send_request(original_request)
def update_cookies(self, original_request):
# Cookie header needs first to be deleted for the header to be updated using
# the prepare_cookies method. See request.PrepareRequest.prepare_cookies
if 'Cookie' in original_request.headers:
del original_request.headers['Cookie']
original_request.prepare_cookies(self.cookies)
def init_session(self):
self.start_session()
def __call__(self, request):
request.register_hook('response', self.handle_401)
return request
def send_request(self, request):
return self._session.send(request)
@property
def cookies(self):
return self._session.cookies
def start_session(self):
self._get_session(self.__auth)
class JIRA(object):
"""User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using ``j = JIRA("https://jira.atlassian.com")``
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* agile_rest_path - the REST path to use for JIRA Agile requests. Defaults to ``greenhopper`` (old, private
API). Check `GreenHopperResource` for other supported values.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
* check_update -- Check whether using the newest python-jira library version.
* cookies -- A dict of custom cookies that are sent in all requests to the server.
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param kerberos: If true it will enable Kerberos authentication.
:param kerberos_options: A dict of properties for Kerberos authentication. The following properties are possible:
* mutual_authentication -- string DISABLED or OPTIONAL.
Example kerberos_options structure: ``{'mutual_authentication': 'DISABLED'}``
:param jwt: A dict of properties for JWT authentication supported by Atlassian Connect. The following
properties are required:
* secret -- shared secret as delivered during 'installed' lifecycle event
(see https://developer.atlassian.com/static/connect/docs/latest/modules/lifecycle.html for details)
* payload -- dict of fields to be inserted in the JWT payload, e.g. 'iss'
Example jwt structure: ``{'secret': SHARED_SECRET, 'payload': {'iss': PLUGIN_KEY}}``
:param validate: If true it will validate your credentials first. Remember that if you are accessing JIRA
as anonymous it will fail to instantiate.
:param get_server_info: If true it will fetch server version info first to determine if some API calls
are available.
:param async_: To enable asynchronous requests for those actions where we implemented it, like issue update() or delete().
:param async_workers: Set the number of worker threads for async operations.
:param timeout: Set a read/connect timeout for the underlying calls to JIRA (default: None)
Obviously this means that you cannot rely on the return code when this is enabled.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"auth_url": '/rest/auth/1/session',
"context_path": "/",
"rest_path": "api",
"rest_api_version": "2",
"agile_rest_path": GreenHopperResource.GREENHOPPER_REST_PATH,
"agile_rest_api_version": "1.0",
"verify": True,
"resilient": True,
"async": False,
"async_workers": 5,
"client_cert": None,
"check_update": False,
# amount of seconds to wait for loading a resource after updating it
# used to avoid server side caching issues, used to be 4 seconds.
"delay_reload": 0,
"headers": {
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
# 'Pragma': 'no-cache',
# 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
'X-Atlassian-Token': 'no-check'}}
checked_version = False
# TODO(ssbarnea): remove these two variables and use the ones defined in resources
JIRA_BASE_URL = Resource.JIRA_BASE_URL
AGILE_BASE_URL = GreenHopperResource.AGILE_BASE_URL
def __init__(self,
server=None,
options=None,
basic_auth=None,
oauth=None,
jwt=None,
kerberos=False,
kerberos_options=None,
validate=False,
get_server_info=True,
async_=False,
async_workers=5,
logging=True,
max_retries=3,
proxies=None,
timeout=None,
auth=None,
):
"""Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param server: The server address and context path to use. Defaults to ``http://localhost:2990/jira``.
:type server: Optional[str]
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* agile_rest_path - the REST path to use for JIRA Agile requests. Defaults to ``greenhopper`` (old, private
API). Check `GreenHopperResource` for other supported values.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
* check_update -- Check whether using the newest python-jira library version.
:type options: Optional[Dict[str, Any]]
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:type basic_auth: Union[Dict, None, Tuple[str, str]]
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:type oauth: Optional[Any]
:param kerberos: If true it will enable Kerberos authentication.
:type kerberos: bool
:param kerberos_options: A dict of properties for Kerberos authentication. The following properties are possible:
* mutual_authentication -- string DISABLED or OPTIONAL.
Example kerberos_options structure: ``{'mutual_authentication': 'DISABLED'}``
:type kerberos_options: Optional[Dict[str,str]]
:param jwt: A dict of properties for JWT authentication supported by Atlassian Connect. The following
properties are required:
* secret -- shared secret as delivered during 'installed' lifecycle event
(see https://developer.atlassian.com/static/connect/docs/latest/modules/lifecycle.html for details)
* payload -- dict of fields to be inserted in the JWT payload, e.g. 'iss'
Example jwt structure: ``{'secret': SHARED_SECRET, 'payload': {'iss': PLUGIN_KEY}}``
:type jwt: Optional[Any]
:param validate: If true it will validate your credentials first. Remember that if you are accessing JIRA
as anonymous it will fail to instantiate.
:type validate: bool
:param get_server_info: If true it will fetch server version info first to determine if some API calls
are available.
:type get_server_info: bool
:param async_: To enable async requests for those actions where we implemented it, like issue update() or delete().
:type async_: bool
:param async_workers: Set the number of worker threads for async operations.
:type async_workers: int
:param timeout: Set a read/connect timeout for the underlying calls to JIRA (default: None)
:type timeout: Optional[Any]
Obviously this means that you cannot rely on the return code when this is enabled.
:param max_retries: Sets the amount Retries for the HTTP sessions initiated by the client. (Default: 3)
:type max_retries: int
:param proxies: Sets the proxies for the HTTP session.
:type proxies: Optional[Any]
:param auth: Set a cookie auth token if this is required.
:type auth: Optional[Tuple[str,str]]
:param logging: Determine whether or not logging should be enabled. (Default: True)
:type logging: bool
"""
# force a copy of the tuple to be used in __del__() because
# sys.version_info could have already been deleted in __del__()
self.sys_version_info = tuple([i for i in sys.version_info])
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async_:
options['async'] = async_
options['async_workers'] = async_workers
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
self._rank = None
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
context_path = urlparse(self._options['server']).path
if len(context_path) > 0:
self._options['context_path'] = context_path
self._try_magic()
if oauth:
self._create_oauth_session(oauth, timeout)
elif basic_auth:
self._create_http_basic_session(*basic_auth, timeout=timeout)
self._session.headers.update(self._options['headers'])
elif jwt:
self._create_jwt_session(jwt, timeout)
elif kerberos:
self._create_kerberos_session(timeout, kerberos_options=kerberos_options)
elif auth:
self._create_cookie_auth(auth, timeout)
validate = True # always log in for cookie based auth, as we need a first request to be logged in
else:
verify = self._options['verify']
self._session = ResilientSession(timeout=timeout)
self._session.verify = verify
self._session.headers.update(self._options['headers'])
if 'cookies' in self._options:
self._session.cookies.update(self._options['cookies'])
self._session.max_retries = max_retries
if proxies:
self._session.proxies = proxies
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
user = self.session(auth)
if user.raw is None:
auth_method = (
oauth or basic_auth or jwt or kerberos or auth or "anonymous"
)
raise JIRAError("Can not log in with %s" % str(auth_method))
self.deploymentType = None
if get_server_info:
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
logging.error("invalid server_info: %s", si)
raise e
self.deploymentType = si.get('deploymentType')
else:
self._version = (0, 0, 0)
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
self._fields = {}
for f in self.fields():
if 'clauseNames' in f:
for name in f['clauseNames']:
self._fields[name] = f['id']
def _create_cookie_auth(self, auth, timeout):
self._session = ResilientSession(timeout=timeout)
self._session.auth = JiraCookieAuth(self._session, self.session, auth)
self._session.verify = self._options['verify']
self._session.cert = self._options['client_cert']
def _check_update_(self):
"""Check if the current version of the library is outdated."""
try:
data = requests.get("https://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if parse_version(released_version) > parse_version(__version__):
warnings.warn(
"You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
"""Destructor for JIRA instance."""
self.close()
def close(self):
session = getattr(self, "_session", None)
if session is not None:
try:
session.close()
except TypeError:
# TypeError: "'NoneType' object is not callable"
# Could still happen here because other references are also
# in the process to be torn down, see warning section in
# https://docs.python.org/2/reference/datamodel.html#object.__del__
pass
self._session = None
def _check_for_html_error(self, content):
# JIRA has the bad habit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
def _get_sprint_field_id(self):
sprint_field_name = "Sprint"
sprint_field_id = [f['schema']['customId'] for f in self.fields()
if f['name'] == sprint_field_name][0]
return sprint_field_id
def _fetch_pages(self,
item_type,
items_key,
request_path,
startAt=0,
maxResults=50,
params=None,
base=JIRA_BASE_URL,
):
"""Fetch pages.
:param item_type: Type of single item. ResultList of such items will be returned.
:type item_type: type
:param items_key: Path to the items in JSON returned from server.
Set it to None, if response is an array, and not a JSON object.
:type items_key: Optional[str]
:param request_path: path in request URL
:type request_path: str
:param startAt: index of the first record to be fetched. (Default: 0)
:type startAt: int
:param maxResults: Maximum number of items to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default:50)
:type maxResults: int
:param params: Params to be used in all requests. Should not contain startAt and maxResults,
as they will be added for each request created from this function.
:type params: Dict[str, Any]
:param base: base URL
:type base: str
:rtype: ResultList
"""
async_class = None
if self._options['async']:
try:
from requests_futures.sessions import FuturesSession
async_class = FuturesSession
except ImportError:
pass
async_workers = self._options['async_workers']
page_params = params.copy() if params else {}
if startAt:
page_params['startAt'] = startAt
if maxResults:
page_params['maxResults'] = maxResults
resource = self._get_json(request_path, params=page_params, base=base)
next_items_page = self._get_items_from_page(item_type, items_key,
resource)
items = next_items_page
if True: # isinstance(resource, dict):
if isinstance(resource, dict):
total = resource.get('total')
# 'isLast' is the optional key added to responses in JIRA Agile 6.7.6. So far not used in basic JIRA API.
is_last = resource.get('isLast', False)
start_at_from_response = resource.get('startAt', 0)
max_results_from_response = resource.get('maxResults', 1)
else:
# if is a list
total = 1
is_last = True
start_at_from_response = 0
max_results_from_response = 1
# If maxResults evaluates as False, get all items in batches
if not maxResults:
page_size = max_results_from_response or len(items)
page_start = (startAt or start_at_from_response or 0) + page_size
if async_class is not None and not is_last and (
total is not None and len(items) < total):
async_fetches = []
future_session = async_class(session=self._session, max_workers=async_workers)
for start_index in range(page_start, total, page_size):
page_params = params.copy()
page_params['startAt'] = start_index
page_params['maxResults'] = page_size
url = self._get_url(request_path)
r = future_session.get(url, params=page_params)
async_fetches.append(r)
for future in async_fetches:
response = future.result()
resource = json_loads(response)
if resource:
next_items_page = self._get_items_from_page(
item_type, items_key, resource)
items.extend(next_items_page)
while async_class is None and not is_last and (
total is None or page_start < total) and len(
next_items_page) == page_size:
page_params['startAt'] = page_start
page_params['maxResults'] = page_size
resource = self._get_json(request_path, params=page_params, base=base)
if resource:
next_items_page = self._get_items_from_page(
item_type, items_key, resource)
items.extend(next_items_page)
page_start += page_size
else:
# if resource is an empty dictionary we assume no-results
break
return ResultList(items, start_at_from_response, max_results_from_response, total, is_last)
else:
# it seams that search_users can return a list() containing a single user!
return ResultList([item_type(self._options, self._session, resource)], 0, 1, 1, True)
def _get_items_from_page(self,
item_type,
items_key,
resource,
):
"""
:type item_type: type
:type items_key: str
:type resource: Dict[str, Any]
:rtype: Union[List[Dashboard], List[Issue]]
"""
try:
return [item_type(self._options, self._session, raw_issue_json) for raw_issue_json in
(resource[items_key] if items_key else resource)]
except KeyError as e:
# improving the error text so we know why it happened
raise KeyError(str(e) + " : " + json.dumps(resource))
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""Find Resource object for any addressable resource on the server.
This method is a universal resource locator for any REST-ful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:type resource_format: str
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
:rtype: Resource
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""Execute all asynchronous jobs and wait for them to finish. By default it will run on 10 threads.
:param size: number of threads to run on.
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing asynchronous %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""Return the mutable server application properties.
:param key: the single property to return a value for
:type key: Optional[str]
:rtype: Union[Dict[str, str], List[Dict[str, str]]]
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""Set the application property.
:param key: key of the property to set
:type key: str
:param value: value to assign to the property
:type value: str
"""
url = self._options['server'] + \
'/rest/api/latest/application-properties/' + key
payload = {
'id': key,
'value': value}
return self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""List of application links.
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
# url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID.
:param id: The Attachment ID
:type id: str
:rtype: Attachment
"""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata.
:rtype: Dict[str, int]
"""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:type issue: str
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:type attachment: BufferedReader
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you acquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:type filename: str
:rtype: Attachment
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
"""Returns files stream of attachment.
:rtype: MultipartEncoder
"""
return MultipartEncoder(
fields={
'file': (fname, attachment, 'application/octet-stream')})
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
js = json_loads(r)
if not js or not isinstance(js, collections.Iterable):
raise JIRAError("Unable to parse JSON: %s" % js)
attachment = Attachment(self._options, self._session, js[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
def delete_attachment(self, id):
"""Delete attachment by id.
:param id: ID of the attachment to delete
:type id: str
"""
url = self._get_url('attachment/' + str(id))
return self._session.delete(url)
# Components
def component(self, id):
"""Get a component Resource from the server.
:param id: ID of the component to get
:type id: str
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self,
name,
project,
description=None,
leadUserName=None,
assigneeType=None,
isAssigneeTypeValid=False,
):
"""Create a component inside a project and return a Resource for it.
:param name: name of the component
:type name: str
:param project: key of the project to create the component in
:type project: str
:param description: a description of the component
:type description: str
:param leadUserName: the username of the user responsible for this component
:type leadUserName: Optional[str]
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:type assigneeType: Optional[str]
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable (Default: False)
:type isAssigneeTypeValid: bool
:rtype: Component
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
def delete_component(self, id):
"""Delete component by id.
:param id: ID of the component to use
:type id: str
:rtype: Response
"""
url = self._get_url('component/' + str(id))
return self._session.delete(url)
# Custom field options
def custom_field_option(self, id):
"""Get a custom field option Resource from the server.
:param id: ID of the custom field to use
:type id: str
:rtype: CustomFieldOption
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:type filter: Optional[str]
:param startAt: index of the first dashboard to return (Default: 0)
:type startAt: int
:param maxResults: maximum number of dashboards to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 20)
:type maxResults: int
:rtype: ResultList
"""
params = {}
if filter is not None:
params['filter'] = filter
return self._fetch_pages(Dashboard, 'dashboards', 'dashboard', startAt, maxResults, params)
def dashboard(self, id):
"""Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
:type id: str
:rtype: Dashboard
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields.
:rtype: List[Dict[str, Any]]
"""
return self._get_json('field')
# Filters
def filter(self, id):
"""Get a filter Resource from the server.
:param id: ID of the filter to get.
:type id: str
:rtype: Filter
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user.
:rtype: List[Filter]
"""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""Create a new filter and return a filter Resource for it.
:param name: name of the new filter
:type name: str
:param description: useful human readable description of the new filter
:type description: str
:param jql: query string that defines the filter
:type jql: str
:param favourite: whether to add this filter to the current user's favorites
:type favourite: bool
:rtype: Filter
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
def update_filter(self, filter_id,
name=None, description=None,
jql=None, favourite=None):
"""Update a filter and return a filter Resource for it.
:param name: name of the new filter
:type name: Optional[str]
:param description: useful human readable description of the new filter
:type description: Optional[str]
:param jql: query string that defines the filter
:type jql: Optional[str]
:param favourite: whether to add this filter to the current user's favorites
:type favourite: Optional[bool]
"""
filter = self.filter(filter_id)
data = {}
data['name'] = name or filter.name
data['description'] = description or filter.description
data['jql'] = jql or filter.jql
data['favourite'] = favourite or filter.favourite
url = self._get_url('filter/%s' % filter_id)
r = self._session.put(url, headers={'content-type': 'application/json'},
data=json.dumps(data))
raw_filter_json = json.loads(r.text)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
def group(self, id, expand=None):
"""Get a group Resource from the server.
:param id: ID of the group to get
:param id: str
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:rtype: User
"""
group = Group(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
group.find(id, params=params)
return group
# non-resource
def groups(self, query=None, exclude=None, maxResults=9999):
"""Return a list of groups matching the specified criteria.
:param query: filter groups by name with this string
:type query: Optional[str]
:param exclude: filter out groups by name with this string
:type exclude: Optional[Any]
:param maxResults: maximum results to return. (Default: 9999)
:type maxResults: int
:rtype: List[str]
"""
params = {}
groups = []
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
for group in self._get_json('groups/picker', params=params)['groups']:
groups.append(group['name'])
return sorted(groups)
def group_members(self, group):
"""Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
:param group: Name of the group.
:type group: str
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['key']] = {'name': user['name'],
'fullname': user['displayName'],
'email': user.get('emailAddress', 'hidden'),
'active': user['active']}
return OrderedDict(sorted(result.items(), key=lambda t: t[0]))
def add_group(self, groupname):
"""Create a new group in JIRA.
:param groupname: The name of the group you wish to create.
:type groupname: str
:return: Boolean - True if successful.
:rtype: bool
"""
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
"""Delete a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:type groupname: str
:return: Boolean. Returns True on success.
:rtype: bool
"""
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""Get an issue Resource from the server.
:param id: ID or key of the issue to get
:type id: Union[Issue, str]
:param fields: comma-separated string of issue fields to include in the results
:type fields: Optional[str]
:param expand: extra information to fetch inside each resource
:type expand: Optional[str]
:rtype: Issue
"""
# this allows us to pass Issue objects to issue()
if isinstance(id, Issue):
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments
will be ignored
:type fields: Optional[Dict[str, Any]]
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value
returned from this method
:type prefetch: bool
:rtype: Issue
"""
data = _field_worker(fields, **fieldargs)
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
p = data['fields']['issuetype']
if isinstance(p, integer_types):
data['fields']['issuetype'] = {'id': p}
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['issuetype'] = {'id': self.issue_type_by_name(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, response=r, url=url, text=json.dumps(data))
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def create_issues(self, field_list, prefetch=True):
"""Bulk create new issues and return an issue Resource for each successfully created issue.
See `create_issue` documentation for field information.
:param field_list: a list of dicts each containing field names and the values to use. Each dict
is an individual issue to create and is subject to its minimum requirements.
:type field_list: List[Dict[str, Any]]
:param prefetch: whether to reload the created issue Resource for each created issue so that all
of its data is present in the value returned from this method.
:type prefetch: bool
:rtype: List[Dict[str, Any]]
"""
data = {'issueUpdates': []}
for field_dict in field_list:
issue_data = _field_worker(field_dict)
p = issue_data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
issue_data['fields']['project'] = {'id': self.project(p).id}
p = issue_data['fields']['issuetype']
if isinstance(p, integer_types):
issue_data['fields']['issuetype'] = {'id': p}
if isinstance(p, string_types) or isinstance(p, integer_types):
issue_data['fields']['issuetype'] = {'id': self.issue_type_by_name(p).id}
data['issueUpdates'].append(issue_data)
url = self._get_url('issue/bulk')
try:
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
# Catching case where none of the issues has been created. See https://github.com/pycontribs/jira/issues/350
except JIRAError as je:
if je.status_code == 400:
raw_issue_json = json.loads(je.response.text)
else:
raise
issue_list = []
errors = {}
for error in raw_issue_json['errors']:
errors[error['failedElementNumber']] = error['elementErrors']['errors']
for index, fields in enumerate(field_list):
if index in errors:
issue_list.append({'status': 'Error', 'error': errors[index],
'issue': None, 'input_fields': fields})
else:
issue = raw_issue_json['issues'].pop(0)
if prefetch:
issue = self.issue(issue['key'])
else:
issue = Issue(self._options, self._session, raw=issue)
issue_list.append({'status': 'Success', 'issue': issue,
'error': None, 'input_fields': fields})
return issue_list
def supports_service_desk(self):
"""Returns whether or not the JIRA instance supports service desk.
:rtype: bool
"""
url = self._options['server'] + '/rest/servicedeskapi/info'
headers = {'X-ExperimentalApi': 'opt-in'}
try:
r = self._session.get(url, headers=headers)
return r.status_code == 200
except JIRAError:
return False
def create_customer(self, email, displayName):
"""Create a new customer and return an issue Resource for it.
:param email: Customer Email
:type email: str
:param displayName: Customer display name
:type displayName: str
:rtype: Customer
"""
url = self._options['server'] + '/rest/servicedeskapi/customer'
headers = {'X-ExperimentalApi': 'opt-in'}
r = self._session.post(url, headers=headers, data=json.dumps({
'email': email,
'displayName': displayName
}))
raw_customer_json = json_loads(r)
if r.status_code != 201:
raise JIRAError(r.status_code, request=r)
return Customer(self._options, self._session, raw=raw_customer_json)
def service_desks(self):
"""Get a list of ServiceDesk Resources from the server visible to the current authenticated user.
:rtype: List[ServiceDesk]
"""
url = self._options['server'] + '/rest/servicedeskapi/servicedesk'
headers = {'X-ExperimentalApi': 'opt-in'}
r_json = json_loads(self._session.get(url, headers=headers))
projects = [ServiceDesk(self._options, self._session, raw_project_json)
for raw_project_json in r_json['values']]
return projects
def service_desk(self, id):
"""Get a Service Desk Resource from the server.
:param id: ID or key of the Service Desk to get
:type id: str
:rtype: ServiceDesk
"""
return self._find_for_resource(ServiceDesk, id)
def create_customer_request(self, fields=None, prefetch=True, **fieldargs):
"""Create a new customer request and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments
will be ignored
:type fields: Dict[str, Any]
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value
returned from this method
:type prefetch: bool
:rtype: Issue
"""
data = fields
p = data['serviceDeskId']
service_desk = None
if isinstance(p, string_types) or isinstance(p, integer_types):
service_desk = self.service_desk(p)
elif isinstance(p, ServiceDesk):
service_desk = p
data['serviceDeskId'] = service_desk.id
p = data['requestTypeId']
if isinstance(p, integer_types):
data['requestTypeId'] = p
elif isinstance(p, string_types):
data['requestTypeId'] = self.request_type_by_name(
service_desk, p).id
url = self._options['server'] + '/rest/servicedeskapi/request'
headers = {'X-ExperimentalApi': 'opt-in'}
r = self._session.post(url, headers=headers, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'issueKey' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['issueKey'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self,
projectKeys=None,
projectIds=[],
issuetypeIds=None,
issuetypeNames=None,
expand=None,
):
"""Get the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with projectIds.
:type projectKeys: Union[None, Tuple[str, str], str]
:param projectIds: IDs of the projects to filter the results with. Can
be a single value or a comma-delimited string. May be combined with
projectKeys.
:type projectIds: Union[List, Tuple[str, str]]
:param issuetypeIds: IDs of the issue types to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with issuetypeNames.
:type issuetypeIds: Optional[List[str]]
:param issuetypeNames: Names of the issue types to filter the results
with. Can be a single value or a comma-delimited string. May be
combined with issuetypeIds.
:type issuetypeNames: Optional[str]
:param expand: extra information to fetch inside each resource.
:type expand: Optional[str]
:rtype: Dict[str, Any]
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee_id):
"""Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue ID or key to assign
:type issue: int or str
:param assignee_id: the username (Jira Server) or accountId (Jira Cloud) of the user to assign the issue to
:type assignee_id: str
:rtype: bool
"""
url = self._options['server'] + \
'/rest/api/latest/issue/' + str(issue) + '/assignee'
payload = {}
if self.deploymentType == "Cloud":
payload['accountId']= assignee_id
else:
payload["name"] = assignee_id
r = self._session.put(
url, data=json.dumps(payload))
raise_on_error(r)
return True
@translate_resource_args
def comments(self, issue):
"""Get a list of comment Resources.
:param issue: the issue to get comments from
:type issue: str
:rtype: List[Comment]
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None, is_internal=False):
"""Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:type issue: str
:param body: Text of the comment to add
:type body: str
:param visibility: a dict containing two entries: "type" and "value".
"type" is 'role' (or 'group' if the JIRA server has configured
comment visibility for groups) and 'value' is the name of the role
(or group) to which viewing of this comment will be restricted.
:type visibility: Optional[Dict[str, str]]
:param is_internal: Defines whether a comment has to be marked as 'Internal' in Jira Service Desk (Default: False)
:type is_internal: bool
:rtype: Comment
"""
data = {
'body': body,
}
if is_internal:
data.update({
'properties': [
{'key': 'sd.public.comment',
'value': {'internal': is_internal}}
]
})
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data)
)
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""Get the edit metadata for an issue.
:param issue: the issue to get metadata for
:rtype: Dict[str, Dict[str, Dict[str, Any]]]
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""Add a remote link from an issue to an external application and returns a remote link Resource for it.
``destination`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
try:
applicationlinks = self.applicationlinks()
except JIRAError as e:
applicationlinks = []
# In many (if not most) configurations, non-admin users are
# not allowed to list applicationlinks; if we aren't allowed,
# let's let people try to add remote links anyway, we just
# won't be able to be quite as helpful.
warnings.warn(
"Unable to gather applicationlinks; you will not be able "
"to add links to remote issues: (%s) %s" % (
e.status_code,
e.text),
Warning)
data = {}
if isinstance(destination, Issue):
data['object'] = {
'title': str(destination),
'url': destination.permalink()}
for x in applicationlinks:
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in applicationlinks:
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
def add_simple_link(self, issue, object):
"""Add a simple remote link from an issue to web resource.
This avoids the admin access problems from add_remote_link by just
using a simple object and presuming all fields are correct and not
requiring more complex ``application`` data.
``object`` should be a dict containing at least ``url`` to the
linked external URL and ``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param object: the dictionary used to create remotelink data
"""
data = {"object": object}
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
simple_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return simple_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = self.transitions(issue)
id = None
for transition in transitions_json:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, worklog=None, **fieldargs):
"""Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when
performing the transition.
:param fields: a dict containing field names and the values to use.
If present, all other keyword arguments will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except Exception:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId}}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if worklog:
data['update'] = {'worklog': [{'add': {'timeSpent': worklog}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
@translate_resource_args
def votes(self, issue):
"""Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
:rtype: Votes
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
:rtype: Response
"""
url = self._get_url('issue/' + str(issue) + '/votes')
return self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to remove vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
:rtype: Watchers
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher_id):
"""Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher_id: username (Jira Server) or accountId (Jira Cloud) of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher_id))
@translate_resource_args
def remove_watcher(self, issue, watcher_id):
"""Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher_id: username (Jira Server) or accountId (Jira Cloud) of the user to remove from the watchers list
:rtype: Response
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {}
if self.deploymentType == "Cloud":
params['accountId']= watcher_id
else:
params["username"] = watcher_id
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
:rtype: List[Worklog]
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
:rtype: Worklog
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self,
issue,
timeSpent=None,
timeSpentSeconds=None,
adjustEstimate=None,
newEstimate=None,
reduceBy=None,
comment=None,
started=None,
user=None,
):
"""Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
:rtype: Worklog
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
if started.tzinfo is None:
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000+0000")
else:
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/latest/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be
a dict containing ``body`` and ``visibility`` fields: ``body`` being
the text of the comment and ``visibility`` being a dict containing
two entries: ``type`` and ``value``. ``type`` is ``role`` (or
``group`` if the JIRA server has configured comment visibility for
groups) and ``value`` is the name of the role (or group) to which
viewing of this comment will be restricted.
:type comment: Optional[Dict[str, Any]]
:rtype: Response
"""
# let's see if we have the right issue link 'type' and fix it if needed
issue_link_types = self.issue_link_types()
if type not in issue_link_types:
for lt in issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he meant
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type},
'inwardIssue': {
'key': inwardIssue},
'outwardIssue': {
'key': outwardIssue},
'comment': comment}
url = self._get_url('issueLink')
return self._session.post(
url, data=json.dumps(data))
def delete_issue_link(self, id):
"""Delete a link between two issues.
:param id: ID of the issue link to delete
"""
url = self._get_url('issueLink') + "/" + id
return self._session.delete(url)
def issue_link(self, id):
"""Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self, force=False):
"""Get a list of issue link type Resources from the server.
:rtype: List[IssueLinkType]
"""
if not hasattr(self, 'self._cached_issue_link_types') or force:
r_json = self._get_json('issueLinkType')
self._cached_issue_link_types = [
IssueLinkType(self._options, self._session, raw_link_json)
for raw_link_json in r_json['issueLinkTypes']
]
return self._cached_issue_link_types
def issue_link_type(self, id):
"""Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
:type id: str
:rtype: IssueLinkType
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server.
:rtype: List[IssueType]
"""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""Get an issue type Resource from the server.
:param id: ID of the issue type to get
:rtype: IssueType
"""
return self._find_for_resource(IssueType, id)
def issue_type_by_name(self, name):
"""
:param name: Name of the issue type
:type name: str
:rtype: IssueType
"""
issue_types = self.issue_types()
try:
issue_type = [it for it in issue_types if it.name == name][0]
except IndexError:
raise KeyError("Issue type '%s' is unknown." % name)
return issue_type
def request_types(self, service_desk):
""" Returns request types supported by a service desk instance.
:param service_desk: The service desk instance.
:type service_desk: ServiceDesk
:rtype: List[RequestType]
"""
if hasattr(service_desk, 'id'):
service_desk = service_desk.id
url = (self._options['server'] +
'/rest/servicedeskapi/servicedesk/%s/requesttype'
% service_desk)
headers = {'X-ExperimentalApi': 'opt-in'}
r_json = json_loads(self._session.get(url, headers=headers))
request_types = [
RequestType(self._options, self._session, raw_type_json)
for raw_type_json in r_json['values']]
return request_types
def request_type_by_name(self, service_desk, name):
request_types = self.request_types(service_desk)
try:
request_type = [rt for rt in request_types if rt.name == name][0]
except IndexError:
raise KeyError("Request type '%s' is unknown." % name)
return request_type
# User permissions
# non-resource
def my_permissions(self,
projectKey=None,
projectId=None,
issueKey=None,
issueId=None,
):
"""Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:type projectKey: Optional[str]
:param projectId: limit returned permissions to the specified project
:type projectId: Optional[str]
:param issueKey: limit returned permissions to the specified issue
:type issueKey: Optional[str]
:param issueId: limit returned permissions to the specified issue
:type issueId: Optional[str]
:rtype: Dict[str, Dict[str, Dict[str, str]]]
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server.
:rtype: List[Priority]
"""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""Get a priority Resource from the server.
:param id: ID of the priority to get
:type id: str
:rtype: Priority
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user.
:rtype: List[Project]
"""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""Get a project Resource from the server.
:param id: ID or key of the project to get
:rtype: Project
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""Register an image file as a project avatar.
The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling
:py:meth:`confirm_project_avatar` with the return value of this method. (Default: False)
:type auto_confirm: bool
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avatar to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
return self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
:type project: str
:rtype: List[Component]
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:rtype: List[Version]
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
@translate_resource_args
def get_project_version_by_name(self, project, version_name):
"""Get a version Resource by its name present on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param version_name: name of the version to search for
:type version_name: str
:rtype: Optional[Version]
"""
versions = self.project_versions(project)
for version in versions:
if version.name == version_name:
return version
@translate_resource_args
def rename_version(self, project, old_name, new_name):
"""Rename a version Resource on a project.
:param project: ID or key of the project to get versions from
:type project: str
:param old_name: old name of the version to rename
:type old_name: str
:param new_name: new name of the version to rename
:type new_name: str
:rtype: None
"""
version = self.get_project_version_by_name(project, old_name)
if version:
version.update(name=new_name)
# non-resource
@translate_resource_args
def project_roles(self, project):
"""Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
path = 'project/' + project + '/role'
_rolesdict = self._get_json(path)
rolesdict = {}
for k, v in _rolesdict.items():
tmp = {}
tmp['id'] = v.split("/")[-1]
tmp['url'] = v
rolesdict[k] = tmp
return rolesdict
# TODO(ssbarnea): return a list of Roles()
@translate_resource_args
def project_role(self, project, id):
"""Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
if isinstance(id, Number):
id = "%s" % id
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server.
:rtype: List[Resolution]
"""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""Get a resolution Resource from the server.
:param id: ID of the resolution to get
:type id: str
:rtype: Resolution
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self,
jql_str,
startAt=0,
maxResults=50,
validate_query=True,
fields=None,
expand=None,
json_result=None,
):
"""Get a :class:`~jira.client.ResultList` of issue Resources matching a JQL search string.
:param jql_str: The JQL search string.
:type jql_str: str
:param startAt: Index of the first issue to return. (Default: 0)
:type startAt: int
:param maxResults: Maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned :class:`~jira.client.ResultList`.
If maxResults evaluates as False, it will try to get all issues in batches. (Default: 50)
:type maxResults: int
:param validate_query: Whether or not the query should be validated. (Default: True)
:type validate_query: bool
:param fields: comma-separated string or list of issue fields to include in the results.
Default is to include all fields.
:type fields: Optional[str or list]
:param expand: extra information to fetch inside each resource
:type expand: Optional[str]
:param json_result: JSON response will be returned when this parameter is set to True.
Otherwise, :class:`~jira.client.ResultList` will be returned.
:type json_result: bool
:rtype: dict or :class:`~jira.client.ResultList`
"""
if isinstance(fields, string_types):
fields = fields.split(",")
else:
fields = list(fields or [])
# this will translate JQL field names to REST API Name
# most people do know the JQL names so this will help them use the API easier
untranslate = {} # use to add friendly aliases when we get the results back
if self._fields:
for i, field in enumerate(fields):
if field in self._fields:
untranslate[self._fields[field]] = fields[i]
fields[i] = self._fields[field]
search_params = {
"jql": jql_str,
"startAt": startAt,
"validateQuery": validate_query,
"fields": fields,
"expand": expand}
if json_result:
search_params["maxResults"] = maxResults
if not maxResults:
warnings.warn('All issues cannot be fetched at once, when json_result parameter is set', Warning)
return self._get_json('search', params=search_params)
issues = self._fetch_pages(Issue, 'issues', 'search', startAt, maxResults, search_params)
if untranslate:
for i in issues:
for k, v in iteritems(untranslate):
if k in i.raw.get('fields', {}):
i.raw['fields'][v] = i.raw['fields'][k]
return issues
# Security levels
def security_level(self, id):
"""Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance.
:rtype: Dict[str, Any]
"""
retry = 0
j = self._get_json('serverInfo')
while not j and retry < 3:
logging.warning("Bug https://jira.atlassian.com/browse/JRA-59676 trying again...")
retry += 1
j = self._get_json('serverInfo')
return j
def myself(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('myself')
# Status
def statuses(self):
"""Get a list of status Resources from the server.
:rtype: List[Status]
"""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
# type: (str) -> Status
"""Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Category
def statuscategories(self):
"""Get a list of status category Resources from the server.
:rtype: List[StatusCategory]
"""
r_json = self._get_json('statuscategory')
statuscategories = [StatusCategory(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuscategories
def statuscategory(self, id):
"""Get a status category Resource from the server.
:param id: ID of the status category resource to get
:type id: int
:rtype: StatusCategory
"""
return self._find_for_resource(StatusCategory, id)
# Users
def user(self, id, expand=None):
"""Get a user Resource from the server.
:param id: ID of the user to get
:param id: str
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:rtype: User
"""
self._options["deployment_type"] = self.deploymentType
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: A string to match usernames against
:type username: str
:param projectKeys: Comma-separated list of project keys to check for issue assignment permissions
:type projectKeys: str
:param startAt: Index of the first user to return (Default: 0)
:type startAt: int
:param maxResults: Maximum number of users to return.
If maxResults evaluates as False, it will try to get all users in batches. (Default: 50)
:type maxResults: int
:rtype: ResultList
"""
params = {
'username': username,
'projectKeys': projectKeys}
return self._fetch_pages(User, None, 'user/assignable/multiProjectSearch', startAt, maxResults, params)
def search_assignable_users_for_issues(self,
username,
project=None,
issueKey=None,
expand=None,
startAt=0,
maxResults=50,
):
"""Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: A string to match usernames against
:type username: str
:param project: Filter returned users by permission in this project (expected if a result will be used to
create an issue)
:type project: Optional[str]
:param issueKey: Filter returned users by this issue (expected if a result will be used to edit this issue)
:type issueKey: Optional[str]
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:param startAt: Index of the first user to return (Default: 0)
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 50)
:rtype: ResultList
"""
# "username" is deprecated for the Jira Cloud API and "query" doesn't work for Jira Server (it returns all users)
params = {}
if self.deploymentType == 'Cloud':
params['query'] = username
else:
params['username'] = username
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
return self._fetch_pages(User, None, 'user/assignable/search', startAt, maxResults, params)
# non-resource
def user_avatars(self, username):
"""Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self,
user,
filename,
size,
avatar_img,
contentType=None,
auto_confirm=False,
):
"""Register an image file as a user avatar.
The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: User to register the avatar for
:type user: str
:param filename: name of the avatar file
:type filename: str
:param size: size of the avatar file
:type size: int
:param avatar_img: file-like object containing the avatar
:type avatar_img: bytes
:param contentType: explicit specification for the avatar image's content-type
:type contentType: Optional[Any]
:param auto_confirm: whether to automatically confirm the temporary avatar by calling
:py:meth:`confirm_user_avatar` with the return value of this method. (Default: False)
:type auto_confirm: bool
:rtype: NoReturn
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
# remove path from filename
filename = os.path.split(filename)[1]
params = {
'username': user,
'filename': filename,
'size': size}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:type user: str
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
:type cropping_properties: Dict[str,Any]
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
return self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against.
:type user: str
:param startAt: index of the first user to return.
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches.
:type maxResults: int
:param includeActive: If true, then active users are included in the results. (Default: True)
:type includeActive: bool
:param includeInactive: If true, then inactive users are included in the results. (Default: False)
:type includeInactive: bool
:rtype: ResultList
"""
params = {
'username': user,
'includeActive': includeActive,
'includeInactive': includeInactive}
return self._fetch_pages(User, None, 'user/search', startAt, maxResults, params)
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""Get a list of user Resources that match a username string and have browse permission for the issue or project.
:param user: a string to match usernames against.
:type user: str
:param issueKey: find users with browse permission for this issue.
:type issueKey: Optional[str]
:param projectKey: find users with browse permission for this project.
:type projectKey: Optional[str]
:param startAt: index of the first user to return. (Default: 0)
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 50)
:type maxResults: int
"""
params = {
'username': user}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
return self._fetch_pages(User, None, 'user/viewissue/search', startAt, maxResults, params)
# Versions
@translate_resource_args
def create_version(self,
name,
project,
description=None,
releaseDate=None,
startDate=None,
archived=False,
released=False,
):
"""Create a version in a project and return a Resource for it.
:param name: name of the version to create
:type name: str
:param project: key of the project to create the version in
:type project: str
:param description: a description of the version
:type description: str
:param releaseDate: the release date assigned to the version
:type releaseDate: Optional[Any]
:param startDate: The start date for the version
:type startDate: Optional[Any]
:param archived: Denotes whether a version should be archived. (Default: False)
:type archived: bool
:param released: Denotes whether a version is released. (Default: False)
:type released: bool
:rtype: Version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
time.sleep(1)
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""Move a version within a project's ordered version list and return a new version Resource for it.
One, but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""Get a version Resource.
:param id: ID of the version to get
:type id: str
:param expand: extra information to fetch inside each resource
:type expand: Optional[Any]
:rtype: Version
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self, auth=None):
"""Get a dict of the current authenticated user's session information.
:param auth: Tuple of username and password.
:type auth: Optional[Tuple[str,str]]
:rtype: User
"""
url = '{server}{auth_url}'.format(**self._options)
if isinstance(self._session.auth, tuple) or auth:
if not auth:
auth = self._session.auth
username, password = auth
authentication_data = {'username': username, 'password': password}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
return self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session.
Works only for non-cloud deployments, for others does nothing.
:rtype: Optional[Any]
"""
if self.deploymentType != 'Cloud':
url = self._options['server'] + '/rest/auth/1/websudo'
return self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password, timeout=None):
""" Creates a basic http session.
:param username: Username for the session
:type username: str
:param password: Password for the username
:type password: str
:param timeout: If set determines the timeout period for the Session.
:type timeout: Optional[int]
:rtype: NoReturn
"""
verify = self._options['verify']
self._session = ResilientSession(timeout=timeout)
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth, timeout):
verify = self._options['verify']
from oauthlib.oauth1 import SIGNATURE_RSA
from requests_oauthlib import OAuth1
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret'])
self._session = ResilientSession(timeout)
self._session.verify = verify
self._session.auth = oauth
def _create_kerberos_session(self, timeout, kerberos_options=None):
verify = self._options['verify']
if kerberos_options is None:
kerberos_options = {}
from requests_kerberos import DISABLED
from requests_kerberos import HTTPKerberosAuth
from requests_kerberos import OPTIONAL
if kerberos_options.get('mutual_authentication', 'OPTIONAL') == 'OPTIONAL':
mutual_authentication = OPTIONAL
elif kerberos_options.get('mutual_authentication') == 'DISABLED':
mutual_authentication = DISABLED
else:
raise ValueError("Unknown value for mutual_authentication: %s" %
kerberos_options['mutual_authentication'])
self._session = ResilientSession(timeout=timeout)
self._session.verify = verify
self._session.auth = HTTPKerberosAuth(mutual_authentication=mutual_authentication)
@staticmethod
def _timestamp(dt=None):
t = datetime.datetime.utcnow()
if dt is not None:
t += dt
return calendar.timegm(t.timetuple())
def _create_jwt_session(self, jwt, timeout):
try:
jwt_auth = JWTAuth(jwt['secret'], alg='HS256')
except NameError as e:
logging.error("JWT authentication requires requests_jwt")
raise e
jwt_auth.set_header_format('JWT %s')
jwt_auth.add_field("iat", lambda req: JIRA._timestamp())
jwt_auth.add_field("exp", lambda req: JIRA._timestamp(datetime.timedelta(minutes=3)))
jwt_auth.add_field("qsh", QshGenerator(self._options['context_path']))
for f in jwt['payload'].items():
jwt_auth.add_field(f[0], f[1])
self._session = ResilientSession(timeout=timeout)
self._session.verify = self._options['verify']
self._session.auth = jwt_auth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar}
return self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
""" Returns the full url based on JIRA base url and the path provided
:param path: The subpath desired.
:type path: str
:param base: The base url which should be prepended to the path
:type base: Optional[str]
:return Fully qualified URL
:rtype: str
"""
options = self._options.copy()
options.update({'path': path})
return base.format(**options)
def _get_json(self,
path,
params=None,
base=JIRA_BASE_URL,
):
"""Get the json for a given path and params.
:param path: The subpath required
:type path: str
:param params: Parameters to filter the json query.
:type params: Optional[Dict[str, Any]]
:param base: The Base JIRA URL, defaults to the instance base.
:type base: Optional[str]
:rtype: Union[Dict[str, Any], List[Dict[str, str]]]
"""
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
if not resource:
raise JIRAError("Unable to find resource %s(%s)", resource_cls, ids)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
def cleanup(x):
_magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
"""Get the MIME type for a given stream of bytes
:param buff: Stream of bytes
:type buff: bytes
:rtype: str
"""
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def rename_user(self, old_user, new_user):
"""Rename a JIRA user.
:param old_user: Old username login
:type old_user: str
:param new_user: New username login
:type new_user: str
"""
if self._version > (6, 0, 0):
url = self._options['server'] + '/rest/api/latest/user'
payload = {
"name": new_user}
params = {
'username': old_user}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
raise_on_error(r)
else:
raise NotImplementedError("Support for renaming users in Jira "
"< 6.0.0 has been removed.")
def delete_user(self, username):
"""Deletes a JIRA User.
:param username: Username to delete
:type username: str
:return: Success of user deletion
:rtype: bool
"""
url = self._options['server'] + '/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def deactivate_user(self, username):
"""Disable/deactivate the user.
:param username: User to be deactivated.
:type username: str
:rtype: Union[str, int]
"""
if self.deploymentType == 'Cloud':
# Disabling users now needs cookie auth in the Cloud - see https://jira.atlassian.com/browse/ID-6230
if 'authCookie' not in vars(self):
user = self.session()
if user.raw is None:
raise JIRAError("Can not log in!")
self.authCookie = '%s=%s' % (user.raw['session']['name'], user.raw['session']['value'])
url = self._options['server'] + '/admin/rest/um/1/user/deactivate?username=%s' % (username)
# We can't use our existing session here - this endpoint is fragile and objects to extra headers
try:
r = requests.post(url, headers={'Cookie': self.authCookie, 'Content-Type': 'application/json'},
proxies=self._session.proxies, data={})
if r.status_code == 200:
return True
else:
logging.warning(
'Got response from deactivating %s: %s' % (username, r.status_code))
return r.status_code
except Exception as e:
logging.error(
"Error Deactivating %s: %s" % (username, e))
raise JIRAError("Error Deactivating %s: %s" % (username, e))
else:
url = self._options['server'] + '/secure/admin/user/EditUser.jspa'
self._options['headers']['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
user = self.user(username)
userInfo = {
'inline': 'true',
'decorator': 'dialog',
'username': user.name,
'fullName': user.displayName,
'email': user.emailAddress,
'editName': user.name
}
try:
r = self._session.post(url, headers=self._options['headers'], data=userInfo)
if r.status_code == 200:
return True
else:
logging.warning(
'Got response from deactivating %s: %s' % (username, r.status_code))
return r.status_code
except Exception as e:
logging.error(
"Error Deactivating %s: %s" % (username, e))
raise JIRAError("Error Deactivating %s: %s" % (username, e))
def reindex(self, force=False, background=True):
"""Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a background reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn't say this is needed, False by default.
:param background: reindex in background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip', attachments=False):
"""Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished."""
if self.deploymentType == 'Cloud':
url = self._options['server'] + '/rest/backup/1/export/runbackup'
payload = json.dumps({"cbAttachments": attachments})
self._options['headers']['X-Requested-With'] = 'XMLHttpRequest'
else:
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
try:
r = self._session.post(url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
except Exception as e:
logging.error("I see %s", e)
def backup_progress(self):
"""Return status of cloud backup as a dict.
Is there a way to get progress for Server version?
"""
epoch_time = int(time.time() * 1000)
if self.deploymentType == 'Cloud':
url = self._options['server'] + '/rest/obm/1.0/getprogress?_=%i' % epoch_time
else:
logging.warning(
'This functionality is not available in Server version')
return None
r = self._session.get(
url, headers=self._options['headers'])
# This is weird. I used to get xml, but now I'm getting json
try:
return json.loads(r.text)
except Exception:
import defusedxml.ElementTree as etree
progress = {}
try:
root = etree.fromstring(r.text)
except etree.ParseError as pe:
logging.warning('Unable to find backup info. You probably need to initiate a new backup. %s' % pe)
return None
for k in root.keys():
progress[k] = root.get(k)
return progress
def backup_complete(self):
"""Return boolean based on 'alternativePercentage' and 'size' returned from backup_progress (cloud only)."""
if self.deploymentType != 'Cloud':
logging.warning(
'This functionality is not available in Server version')
return None
status = self.backup_progress()
perc_complete = int(re.search(r"\s([0-9]*)\s",
status['alternativePercentage']).group(1))
file_size = int(status['size'])
return perc_complete >= 100 and file_size > 0
def backup_download(self, filename=None):
"""Download backup file from WebDAV (cloud only)."""
if self.deploymentType != 'Cloud':
logging.warning(
'This functionality is not available in Server version')
return None
remote_file = self.backup_progress()['fileName']
local_file = filename or remote_file
url = self._options['server'] + '/webdav/backupmanager/' + remote_file
try:
logging.debug('Writing file to %s' % local_file)
with open(local_file, 'wb') as file:
try:
resp = self._session.get(url, headers=self._options['headers'], stream=True)
except Exception:
raise JIRAError()
if not resp.ok:
logging.error("Something went wrong with download: %s" % resp.text)
raise JIRAError(resp.text)
for block in resp.iter_content(1024):
file.write(block)
except JIRAError as je:
logging.error('Unable to access remote backup file: %s' % je)
except IOError as ioe:
logging.error(ioe)
return None
def current_user(self):
"""Returns the username or account-id of the current user. For anonymous
users it will return a value that evaluates as False.
:rtype: str
"""
if not hasattr(self, '_myself'):
url = self._get_url('myself')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
self._myself = r_json
print(r_json, r.headers)
# if 'X-AACCOUNTID' in r.headers:
# r_json['username'] = r.headers['X-AACCOUNTID']
# elif 'x-ausername' in r.headers:
# r_json['username'] = r.headers['x-ausername']
# else:
# r_json['username'] = None
# del r_json['self'] # this isn't really an addressable resource
print(self._myself)
return self._myself['name']
def delete_project(self, pid):
"""Delete project from Jira.
:param pid: JIRA projectID or Project or slug
:type pid: str
:return: True if project was deleted
:rtype: bool
:raises JIRAError: If project not found or not enough permissions
:raises ValueError: If pid parameter is not Project, slug or ProjectID
"""
# allows us to call it with Project objects
if hasattr(pid, 'id'):
pid = pid.id
url = self._options['server'] + '/rest/api/2/project/%s' % pid
r = self._session.delete(url)
if r.status_code == 403:
raise JIRAError('Not enough permissions to delete project')
if r.status_code == 404:
raise JIRAError('Project not found in Jira')
return r.ok
def _gain_sudo_session(self, options, destination):
url = self._options['server'] + '/secure/admin/WebSudoAuthenticate.jspa'
if not self._session.auth:
self._session.auth = get_netrc_auth(url)
payload = {
'webSudoPassword': self._session.auth[1],
'webSudoDestination': destination,
'webSudoIsPost': 'true'}
payload.update(options)
return self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
@lru_cache(maxsize=None)
def templates(self):
url = self._options['server'] + \
'/rest/project-templates/latest/templates'
r = self._session.get(url)
data = json_loads(r)
templates = {}
if 'projectTemplatesGroupedByType' in data:
for group in data['projectTemplatesGroupedByType']:
for t in group['projectTemplates']:
templates[t['name']] = t
# pprint(templates.keys())
return templates
def create_project(self, key, name=None, assignee=None, type="software", template_name=None):
"""Create a project with the specified parameters.
:param key: Mandatory. Must match JIRA project key requirements, usually only 2-10 uppercase characters.
:type: str
:param name: If not specified it will use the key value.
:type name: Optional[str]
:param assignee: If not specified it will use current user.
:type assignee: Optional[str]
:param type: Determines the type of project should be created.
:type type: Optional[str]
:param template_name: is used to create a project based on one of the existing project templates.
If `template_name` is not specified, then it should use one of the default values.
:type template_name: Optional[str]
:return: Should evaluate to False if it fails otherwise it will be the new project id.
:rtype: Union[bool,int]
"""
template_key = None
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
# preference list for picking a default template
possible_templates = [
'Scrum software development', # have Bug
'Agility', # cannot set summary
'Bug tracking',
'JIRA Classic',
'JIRA Default Schemes',
'Basic software development',
'Project management',
'Kanban software development',
'Task management',
'Basic', # does not have Bug
'Content Management',
'Customer service',
'Document Approval',
'IT Service Desk',
'Lead Tracking',
'Process management',
'Procurement',
'Recruitment',
]
templates = self.templates()
if not template_name:
template_name = next(t for t in possible_templates if t in templates)
template_key = templates[template_name]['projectTemplateModuleCompleteKey']
project_type_key = templates[template_name]['projectTypeKey']
# https://confluence.atlassian.com/jirakb/creating-a-project-via-rest-based-on-jira-default-schemes-744325852.html
# see https://confluence.atlassian.com/jirakb/creating-projects-via-rest-api-in-jira-963651978.html
payload = {'name': name,
'key': key,
'projectTypeKey': project_type_key,
'projectTemplateKey': template_key,
'lead': assignee,
'assigneeType': 'PROJECT_LEAD',
}
url = self._options['server'] + \
'/rest/api/2/project'
r = self._session.post(url, data=json.dumps(payload))
r.raise_for_status()
r_json = json_loads(r)
return r_json
def add_user(self,
username,
email,
directoryId=1,
password=None,
fullname=None,
notify=False,
active=True,
ignore_existing=False,
application_keys=None,
):
"""Create a new JIRA user.
:param username: the username of the new user
:type username: str
:param email: email address of the new user
:type email: str
:param directoryId: The directory ID the new user should be a part of (Default: 1)
:type directoryId: int
:param password: Optional, the password for the new user
:type password: Optional[str]
:param fullname: Optional, the full name of the new user
:type fullname: Optional[str]
:param notify: Whether or not to send a notification to the new user. (Default: False)
:type notify: bool
:param active: Whether or not to make the new user active upon creation. (Default: True)
:type active: bool
:param ignore_existing: Whether or not to ignore and existing user. (Default: False)
:type ignore_existing: bool
:param applicationKeys: Keys of products user should have access to
:type applicationKeys: Optional[list]
:return: Whether or not the user creation was successful.
:rtype: bool
:raises JIRAError: If username already exists and `ignore_existing` has not been set to `True`.
"""
if not fullname:
fullname = username
# TODO(ssbarnea): default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
if notify:
x['notification'] = 'True'
if application_keys is not None:
x['applicationKeys'] = application_keys
payload = json.dumps(x)
try:
self._session.post(url, data=payload)
except JIRAError as e:
err = e.response.json()['errors']
if 'username' in err and err['username'] == 'A user with that username already exists.' and ignore_existing:
return True
raise e
return True
def add_user_to_group(self, username, group):
"""Add a user to an existing group.
:param username: Username that will be added to specified group.
:type username: str
:param group: Group that the user will be added to.
:type group: str
:return: json response from Jira server for success or a value that evaluates as False in case of failure.
:rtype: Union[bool,Dict[str,Any]]
"""
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
r = json_loads(self._session.post(url, params=x, data=payload))
if 'name' not in r or r['name'] != group:
return False
else:
return r
def remove_user_from_group(self, username, groupname):
"""Remove a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
"""
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self, startAt=0, maxResults=50, type=None, name=None, projectKeyOrID=None):
"""Get a list of board resources.
:param startAt: The starting index of the returned boards. Base index: 0.
:param maxResults: The maximum number of boards to return per page. Default: 50
:param type: Filters results to boards of the specified type. Valid values: scrum, kanban.
:param name: Filters results to boards that match or partially match the specified name.
:param projectKeyOrID: Filters results to boards that match the specified project key or ID.
:rtype: ResultList[Board]
When old GreenHopper private API is used, paging is not enabled and all parameters are ignored.
"""
params = {}
if type:
params['type'] = type
if name:
params['name'] = name
if projectKeyOrID:
params['projectKeyOrId'] = projectKeyOrID
if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
# Old, private API did not support pagination, all records were present in response,
# and no parameters were supported.
if startAt or maxResults or params:
warnings.warn('Old private GreenHopper API is used, all parameters will be ignored.', Warning)
r_json = self._get_json('rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json) for raw_boards_json in r_json['views']]
return ResultList(boards, 0, len(boards), len(boards), True)
else:
return self._fetch_pages(Board, 'values', 'board', startAt, maxResults, params, base=self.AGILE_BASE_URL)
@translate_resource_args
def sprints(self, board_id, extended=False, startAt=0, maxResults=50, state=None):
"""Get a list of sprint GreenHopperResources.
:param board_id: the board to get sprints from
:param extended: Used only by old GreenHopper API to fetch additional information like
startDate, endDate, completeDate, much slower because it requires an additional requests for each sprint.
New JIRA Agile API always returns this information without a need for additional requests.
:param startAt: the index of the first sprint to return (0 based)
:param maxResults: the maximum number of sprints to return
:param state: Filters results to sprints in specified states. Valid values: `future`, `active`, `closed`.
You can define multiple states separated by commas
:type board_id: int
:type extended: bool
:type startAt: int
:type maxResults: int
:type state: str
:rtype: list of :class:`~jira.resources.Sprint`
:return: (content depends on API version, but always contains id, name, state, startDate and endDate)
When old GreenHopper private API is used, paging is not enabled,
and `startAt`, `maxResults` and `state` parameters are ignored.
"""
params = {}
if state:
params['state'] = state
if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % board_id,
base=self.AGILE_BASE_URL)
if params:
warnings.warn('Old private GreenHopper API is used, parameters %s will be ignored.' % params, Warning)
if extended:
sprints = [Sprint(self._options, self._session, self.sprint_info(None, raw_sprints_json['id']))
for raw_sprints_json in r_json['sprints']]
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return ResultList(sprints, 0, len(sprints), len(sprints), True)
else:
return self._fetch_pages(Sprint, 'values', 'board/%s/sprint' % board_id, startAt, maxResults, params,
self.AGILE_BASE_URL)
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise Exception
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None, state=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['endDate'] = endDate
if state:
if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
raise NotImplementedError('Public JIRA API does not support state update')
payload['state'] = state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def incompletedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['incompletedIssuesEstimateSum']['value']
def removed_issues(self, board_id, sprint_id):
"""Return the completed issues for the sprint."""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['puntedIssues']]
return issues
def removedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['puntedIssuesEstimateSum']['value']
# TODO(ssbarnea): remove sprint_info() method, sprint() method suit the convention more
def sprint_info(self, board_id, sprint_id):
"""Return the information about a sprint.
:param board_id: the board retrieving issues from. Deprecated and ignored.
:param sprint_id: the sprint retrieving issues from
"""
sprint = Sprint(self._options, self._session)
sprint.find(sprint_id)
return sprint.raw
def sprint(self, id):
"""Return the information about a sprint.
:param sprint_id: the sprint retrieving issues from
:type sprint_id: int
:rtype: :class:`~jira.resources.Sprint`
"""
sprint = Sprint(self._options, self._session)
sprint.find(id)
return sprint
# TODO(ssbarnea): remove this as we do have Board.delete()
def delete_board(self, id):
"""Delete an agile board."""
board = Board(self._options, self._session, raw={'id': id})
board.delete()
def create_board(self, name, project_ids, preset="scrum",
location_type='user', location_id=None):
"""Create a new board for the ``project_ids``.
:param name: name of the board
:type name: str
:param project_ids: the projects to create the board in
:type project_ids: str
:param preset: What preset to use for this board. (Default: "scrum")
:type preset: 'kanban', 'scrum', 'diy'
:param location_type: the location type. Available in cloud. (Default: "user")
:type location_type: 'user', 'project'
:param location_id: the id of project that the board should be
located under. Omit this for a 'user' location_type. Available in cloud.
:type location_id: Optional[str]
:return: The newly created board
:rtype: Board
"""
if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH:
raise NotImplementedError('JIRA Agile Public API does not support this request')
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
if location_id is not None:
location_id = self.project(location_id).id
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
if self.deploymentType == 'Cloud':
payload['locationType'] = location_type
payload['locationId'] = location_id
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""Create a new sprint for the ``board_id``.
:param name: Name of the sprint
:type name: str
:param board_id: Which board the sprint should be assigned.
:type board_id: int
:param startDate: Start date for the sprint.
:type startDate: Optional[Any]
:param endDate: End date for the sprint.
:type endDate: Optional[Any]
:return: The newly created Sprint
:rtype: Sprint
"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
url = self._get_url('sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
else:
url = self._get_url('sprint', base=self.AGILE_BASE_URL)
payload['originBoardId'] = board_id
r = self._session.post(url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""Add the issues in ``issue_keys`` to the ``sprint_id``.
The sprint must be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:type sprint_id: int
:param issue_keys: the issues to add to the sprint
:type issue_keys: List[str]
:rtype: Response
"""
if self._options['agile_rest_path'] == GreenHopperResource.AGILE_BASE_REST_PATH:
url = self._get_url('sprint/%s/issue' % sprint_id, base=self.AGILE_BASE_URL)
payload = {'issues': issue_keys}
try:
self._session.post(url, data=json.dumps(payload))
except JIRAError as e:
if e.status_code == 404:
warnings.warn('Status code 404 may mean, that too old JIRA Agile version is installed.'
' At least version 6.7.10 is required.')
raise
elif self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
# In old, private API the function does not exist anymore and we need to use
# issue.update() to perform this operation
# Workaround based on https://answers.atlassian.com/questions/277651/jira-agile-rest-api-example
sprint_field_id = self._get_sprint_field_id()
data = {'idOrKeys': issue_keys, 'customFieldId': sprint_field_id,
'sprintId': sprint_id, 'addToBacklog': False}
url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL)
return self._session.put(url, data=json.dumps(data))
else:
raise NotImplementedError('No API for adding issues to sprint for agile_rest_path="%s"' %
self._options['agile_rest_path'])
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: The ID for the epic where issues should be added.
:type epic_id: int
:param issue_keys: The issues to add to the epic
:type issue_keys: str
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics. (Default: True)
:type ignore_epics: bool
"""
if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH:
# TODO(ssbarnea): simulate functionality using issue.update()?
raise NotImplementedError('JIRA Agile Public API does not support this request')
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
return self._session.put(
url, data=json.dumps(data))
# TODO(ssbarnea): Both GreenHopper and new JIRA Agile API support moving more than one issue.
def rank(self, issue, next_issue):
"""Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank':
if field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-lexo-rank":
self._rank = field['schema']['customId']
break
elif field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
# Obsolete since JIRA v6.3.13.1
self._rank = field['schema']['customId']
if self._options['agile_rest_path'] == GreenHopperResource.AGILE_BASE_REST_PATH:
url = self._get_url('issue/rank', base=self.AGILE_BASE_URL)
payload = {'issues': [issue], 'rankBeforeIssue': next_issue, 'rankCustomFieldId': self._rank}
try:
return self._session.put(url, data=json.dumps(payload))
except JIRAError as e:
if e.status_code == 404:
warnings.warn('Status code 404 may mean, that too old JIRA Agile version is installed.'
' At least version 6.7.10 is required.')
raise
elif self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
return self._session.put(url, data=json.dumps(data))
else:
raise NotImplementedError('No API for ranking issues for agile_rest_path="%s"' %
self._options['agile_rest_path'])
def move_to_backlog(self, issue_keys):
"""Move issues in ``issue_keys`` to the backlog, removing them from all sprints that have not been completed.
:param issue_keys: the issues to move to the backlog
:param issue_keys: str
:raises JIRAError: If moving issues to backlog fails
"""
if self._options['agile_rest_path'] == GreenHopperResource.AGILE_BASE_REST_PATH:
url = self._get_url('backlog/issue', base=self.AGILE_BASE_URL)
payload = {'issues': issue_keys}
try:
self._session.post(url, data=json.dumps(payload))
except JIRAError as e:
if e.status_code == 404:
warnings.warn('Status code 404 may mean, that too old JIRA Agile version is installed.'
' At least version 6.7.10 is required.')
raise
elif self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH:
# In old, private API the function does not exist anymore and we need to use
# issue.update() to perform this operation
# Workaround based on https://answers.atlassian.com/questions/277651/jira-agile-rest-api-example
sprint_field_id = self._get_sprint_field_id()
data = {'idOrKeys': issue_keys, 'customFieldId': sprint_field_id,
'addToBacklog': True}
url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL)
return self._session.put(url, data=json.dumps(data))
else:
raise NotImplementedError('No API for moving issues to backlog for agile_rest_path="%s"' %
self._options['agile_rest_path'])
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async_=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async_=async_)
| 40.341215
| 154
| 0.606776
|
4a4fad0646345b84c6233227e7a2b39a98952ac3
| 9,024
|
py
|
Python
|
mosaic/infra/modeling.py
|
Reflorescit/InferTextGen
|
277626fb9e34088a76917b899651ea68a5952e35
|
[
"Apache-2.0"
] | null | null | null |
mosaic/infra/modeling.py
|
Reflorescit/InferTextGen
|
277626fb9e34088a76917b899651ea68a5952e35
|
[
"Apache-2.0"
] | null | null | null |
mosaic/infra/modeling.py
|
Reflorescit/InferTextGen
|
277626fb9e34088a76917b899651ea68a5952e35
|
[
"Apache-2.0"
] | null | null | null |
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
from torch.nn import parallel
import torch.nn.functional as F
from torch import nn
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import os
import pdb
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
import logging
from tqdm import tqdm
logger = logging.getLogger("modeling")
from mosaic.infra.logging import log_eval
def train(epoch, tokenizer, model, device, loader, optimizer, val_loader=None, model_class="t5",
save_dir="models", accum_step=1, save=True):
model.train()
batch_count = len(loader)
for iteration, data in tqdm(enumerate(loader, 0), total=len(loader)):
y = data['target_ids'].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
# logger.info('Outside: input size {}'.format(ids.size()))
if model_class == "t5":
outputs = model(input_ids=ids, attention_mask=mask, decoder_input_ids=y_ids,
lm_labels=lm_labels)
else:
outputs = model(input_ids=ids, attention_mask=mask, labels=ids)
loss = outputs[0]
# logger.info('Outside: loss size {}'.format(loss.size()))
# if use DataParallel, loss will be a sequence and should be averaged
if bool(loss.shape):
loss = torch.mean(loss)
# loss regularization
loss /= accum_step
loss.backward()
if (iteration+1) % accum_step == 0:
optimizer.step()
optimizer.zero_grad()
if iteration % 100 == 0:
wandb.log({"Training Loss": loss.item(), "Epoch": epoch,
"Batches left": batch_count - iteration})
batches_left = batch_count - iteration
logger.info(
f'\nEpoch: {epoch}, Iteration: {iteration}, Loss: {loss.item()}, Batches left: {batches_left}')
# if iteration % 500 == 0:
# logger.info(f'\nEpoch: {epoch}, Loss: {loss.item()}, BatchesLeft: {batches_left}')
# if save and iteration % 10000 == 0:
# save_path = os.path.join(save_dir, "iter_{}_model".format(iteration))
# if not os.path.exists(save_path):
# os.makedirs(save_path)
# save_model(model, save_path)
# tokenizer.save_pretrained(save_path)
# if iteration % 1000 == 0 and val_loader != None:
# # log_eval(epoch, tokenizer, model, device, val_loader, model_class=model_class)
# validate(epoch, tokenizer, model, device,val_loader)
# model.train()
def validate(epoch, tokenizer, model, device, loader):
model.eval()
predictions = []
actuals = []
sources = []
logger.info(f"val set size: {len(loader)}")
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
do_sample=True,
# max_length=int(os.environ['OUT_LEN']),
max_length = len(ids[0])+10,
num_beams=5,
top_k=50,
top_p=0.95
)
preds = [
tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
g in generated_ids]
target = [
tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
t in y]
source = [
tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
s in ids]
# if _ % 20 == 0:
# logger.info(f'source: {source} \n target: {target} \n pred: {preds}')
if _ % 100 == 0:
logger.info(f'Completed {_}')
sources.extend(source)
predictions.extend(preds)
actuals.extend(target)
return sources, predictions, actuals
def beam_generations(tokenizer, model, device, loader, top_k=40, out_len=34):
# This method assumes batch size of 1
model.eval()
if isinstance(model, nn.DataParallel):
para_obj = model
model = para_obj.module
predictions = []
actuals = []
sources = []
records = []
with torch.no_grad():
for _, data in tqdm(enumerate(loader, 0), total=len(loader)):
# pdb.set_trace()
#y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
temperature=1.0,
do_sample=False,
max_length=len(ids[0])+10,
top_p=0.9,
top_k=top_k,
repetition_penalty=1.0,
num_return_sequences=10 if top_k > 1 else 1,
num_beams=10
)
preds = [tokenizer.decode(g, clean_up_tokenization_spaces=True) for g in generated_ids]
# try:
# target = [tokenizer.decode(t, clean_up_tokenization_spaces=True) for t in y]
# except:
# target = ['']
# source = [tokenizer.decode(s, clean_up_tokenization_spaces=True) for s in ids]
# records.append({
# 'source': source[0],
# 'target': target[0],
# 'generations': preds
# })
records.append({'generations': preds})
if _ % 10 == 0:
logger.info(records[-1])
# if _ % 100 == 0:
# logger.info(f'Completed {_}')
return records
#
def save_model(model, path):
if not os.path.exists(path):
os.makedirs(path)
if isinstance(model, nn.DataParallel):
model.module.save_pretrained(path)
else:
model.save_pretrained(path)
logger.info("save model to {}".format(path))
# def batch_greedy_generate(tokenizer, model, dataloader, device, max_num_tokens_to_produce=20):
#
# model.eval()
# with torch.no_grad():
# for _, data in enumerate(dataloader, 0):
# input_ids = data['source_ids'].to(device, dtype = torch.long)
# attn_mask = data['source_mask'].to(device, dtype = torch.long)
#
# pad_token_id = tokenizer.pad_token_id
# eos_token_id = tokenizer.eos_token_id
# eos_not_in_sents = torch.ones(input_ids.shape[0]).long()
#
# last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1
#
# start_idx = inp_idx = (last_non_masked_idx).view(-1, 1).repeat(1, tokenizer.vocab_size).unsqueeze(1)
# past = None
# seq_len = input_ids.size(1)
# position_ids = torch.tensor([list(range(seq_len)) for i in range(input_ids.shape[0])])
# for i, position_ids_slice in enumerate(position_ids):
# position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]
#
# for step in range(max_num_tokens_to_produce):
# outputs = model(input_ids, attention_mask=attn_mask, position_ids=position_ids)
#
# if step == 0:
# next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)
# else:
# next_token_logits = outputs[0][:, -1, :]
#
# next_tokens = torch.argmax(next_token_logits, dim=-1)
#
# # this updates which sentences have not seen an <EOS> token so far
# # if one <EOS> token was seen the sentence is finished
# eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())
#
# # either append a padding token here if <EOS> has been seen or append next token
# tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (1 - eos_not_in_sents)
#
# # Update input_ids, attn_mask and position_ids
# input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
# attn_mask = torch.cat([attn_mask, torch.ones((attn_mask.shape[0], 1)).long()], dim=1)
# position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
#
| 38.729614
| 114
| 0.580563
|
f9795d912f19f4f72e27ba7d4b59cdd860cde90b
| 3,451
|
py
|
Python
|
mys/pygments/pygments/lexers/solidity.py
|
eerimoq/sython
|
90937bf44b798b9c1ae0d18e31e11e95967b46c6
|
[
"MIT"
] | 83
|
2020-08-18T18:48:46.000Z
|
2021-01-01T17:00:45.000Z
|
mys/pygments/pygments/lexers/solidity.py
|
eerimoq/sython
|
90937bf44b798b9c1ae0d18e31e11e95967b46c6
|
[
"MIT"
] | 31
|
2021-01-05T00:32:36.000Z
|
2022-02-23T13:34:33.000Z
|
mys/pygments/pygments/lexers/solidity.py
|
eerimoq/sython
|
90937bf44b798b9c1ae0d18e31e11e95967b46c6
|
[
"MIT"
] | 7
|
2021-01-03T11:53:03.000Z
|
2022-02-22T17:49:42.000Z
|
"""
pygments.lexers.solidity
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Solidity.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.lexer import bygroups
from pygments.lexer import include
from pygments.lexer import words
from pygments.token import Comment
from pygments.token import Keyword
from pygments.token import Name
from pygments.token import Number
from pygments.token import Operator
from pygments.token import Punctuation
from pygments.token import String
from pygments.token import Text
from pygments.token import Whitespace
__all__ = ['SolidityLexer']
class SolidityLexer(RegexLexer):
"""
For Solidity source code.
.. versionadded:: 2.5
"""
name = 'Solidity'
aliases = ['solidity']
filenames = ['*.sol']
mimetypes = []
flags = re.MULTILINE | re.UNICODE
datatype = (
r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64'
r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208'
r'|216|224|232|240|248|256)?))\b'
)
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'\bpragma\s+solidity\b', Keyword, 'pragma'),
(r'\b(contract)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword, Whitespace, Name.Entity)),
(datatype + r'(\s+)((?:external|public|internal|private)\s+)?' +
r'([a-zA-Z_]\w*)',
bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)),
(r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Type, Whitespace, Name.Variable)),
(r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword),
(words((
'block', 'break', 'constant', 'constructor', 'continue',
'contract', 'do', 'else', 'external', 'false', 'for',
'function', 'if', 'import', 'inherited', 'internal', 'is',
'library', 'mapping', 'memory', 'modifier', 'msg', 'new',
'payable', 'private', 'public', 'require', 'return',
'returns', 'struct', 'suicide', 'throw', 'this', 'true',
'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin),
(datatype, Keyword.Type),
include('constants'),
(r'[a-zA-Z_]\w*', Text),
(r'[!<=>+*/-]', Operator),
(r'[.;:{}(),\[\]]', Punctuation)
],
'comments': [
(r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline)
],
'constants': [
(r'("(\\"|.)*?")', String.Double),
(r"('(\\'|.)*?')", String.Single),
(r'\b0[xX][0-9a-fA-F]+\b', Number.Hex),
(r'\b\d+\b', Number.Decimal),
],
'pragma': [
include('whitespace'),
include('comments'),
(r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)',
bygroups(Operator, Whitespace, Keyword)),
(r';', Punctuation, '#pop')
],
'whitespace': [
(r'\s+', Whitespace),
(r'\n', Whitespace)
]
}
| 33.833333
| 85
| 0.50681
|
7aef678d16facd97dd1c5674d2ae98c700f51d80
| 3,985
|
py
|
Python
|
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/transform.py
|
gingeard/airbyte
|
cf45cc6be27257abc832432d9aa0d32742a96d19
|
[
"MIT"
] | null | null | null |
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/transform.py
|
gingeard/airbyte
|
cf45cc6be27257abc832432d9aa0d32742a96d19
|
[
"MIT"
] | 1
|
2021-04-19T17:27:34.000Z
|
2021-04-19T17:30:34.000Z
|
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/transform.py
|
gingeard/airbyte
|
cf45cc6be27257abc832432d9aa0d32742a96d19
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import os
from typing import Any, Dict
import yaml
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
class TransformCatalog:
"""
To run this transformation:
```
python3 main_dev_transform_catalog.py \
--integration-type <postgres|bigquery|redshift|snowflake>
--profile-config-dir . \
--catalog integration_tests/catalog.json \
--out dir \
--json-column json_blob
```
"""
config: dict = {}
def __init__(self):
self.config = {}
def run(self, args) -> None:
self.parse(args)
self.process_catalog()
def parse(self, args) -> None:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--integration-type", type=str, required=True, help="type of integration dialect to use")
parser.add_argument("--profile-config-dir", type=str, required=True, help="path to directory containing DBT profiles.yml")
parser.add_argument("--catalog", nargs="+", type=str, required=True, help="path to Catalog (JSON Schema) file")
parser.add_argument("--out", type=str, required=True, help="path to output generated DBT Models to")
parser.add_argument("--json-column", type=str, required=False, help="name of the column containing the json blob")
parsed_args = parser.parse_args(args)
profiles_yml = read_profiles_yml(parsed_args.profile_config_dir)
self.config = {
"integration_type": parsed_args.integration_type,
"schema": extract_schema(profiles_yml),
"catalog": parsed_args.catalog,
"output_path": parsed_args.out,
"json_column": parsed_args.json_column,
}
def process_catalog(self) -> None:
destination_type = DestinationType.from_string(self.config["integration_type"])
schema = self.config["schema"]
output = self.config["output_path"]
json_col = self.config["json_column"]
processor = CatalogProcessor(output_directory=output, destination_type=destination_type)
for catalog_file in self.config["catalog"]:
print(f"Processing {catalog_file}...")
processor.process(catalog_file=catalog_file, json_column_name=json_col, target_schema=schema)
def read_profiles_yml(profile_dir: str) -> Any:
with open(os.path.join(profile_dir, "profiles.yml"), "r") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
obj = config["normalize"]["outputs"]["prod"]
return obj
def extract_schema(profiles_yml: Dict) -> str:
if "dataset" in profiles_yml:
return str(profiles_yml["dataset"])
elif "schema" in profiles_yml:
return str(profiles_yml["schema"])
else:
raise KeyError("No Dataset/Schema defined in profiles.yml")
def main(args=None):
TransformCatalog().run(args)
| 39.068627
| 130
| 0.716186
|
86eb811fbe429b3e76bff4a31eb44ca1b9b08e78
| 1,508
|
py
|
Python
|
scipy/io/matlab/mio5_params.py
|
AndLogiu/scipy
|
3f05efb9498e29f4e735aa7e0220139a8d8c8773
|
[
"BSD-3-Clause"
] | 1
|
2021-12-12T09:35:08.000Z
|
2021-12-12T09:35:08.000Z
|
scipy/io/matlab/mio5_params.py
|
caos21/scipy
|
3f05efb9498e29f4e735aa7e0220139a8d8c8773
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/io/matlab/mio5_params.py
|
caos21/scipy
|
3f05efb9498e29f4e735aa7e0220139a8d8c8773
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio5_params
__all__ = [ # noqa: F822
'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
'mxUINT64_CLASS', 'mxUINT8_CLASS'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio5_params is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio5_params` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio5_params, name)
| 39.684211
| 79
| 0.688329
|
8d0ed513d52acefa441eba6a7c0c9fe45aae0061
| 1,475
|
py
|
Python
|
.history/Test_20190122142843.py
|
zhenglitao1/piCar
|
b8f77f28ea61159c55746b0a3474ebfc3c66b6ba
|
[
"Apache-2.0"
] | null | null | null |
.history/Test_20190122142843.py
|
zhenglitao1/piCar
|
b8f77f28ea61159c55746b0a3474ebfc3c66b6ba
|
[
"Apache-2.0"
] | null | null | null |
.history/Test_20190122142843.py
|
zhenglitao1/piCar
|
b8f77f28ea61159c55746b0a3474ebfc3c66b6ba
|
[
"Apache-2.0"
] | null | null | null |
import socket
def getLocalIp():
'''Get the local ip'''
try:
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(('8.8.8.8',80))
ip=s.getsockname()[0]
finally:
s.close()
return ip
def main():
'''The main thread, control the motor'''
host=getLocalIp()
print('localhost ip :'+host)
port=5050
#Init the tcp socket
tcpServer=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
tcpServer.bind((host,port))
tcpServer.setblocking(0) #Set unblock mode
tcpServer.listen(5)
while True:
try:
time.sleep(0.001)
(client,addr)=tcpServer.accept()
print('accept the client!')
# oled.writeArea4(' Connect')
client.setblocking(0)
while True:
time.sleep(0.001)
cameraAction(steer,cameraActionState)
try:
data=client.recv(1024)
data=bytes.decode(data)
if(len(data)==0):
print('client is closed')
# oled.writeArea4(' Disconnect')
break
motorAction(motor,data)
cameraActionState=setCameraAction(data)
except socket.error:
continue
except KeyboardInterrupt,e:
raise e
except socket.error:
pass
print (main())
| 28.365385
| 62
| 0.510508
|
ae845abf050dc4cd7d203965dddb4b2032adc9e2
| 7,304
|
py
|
Python
|
yt/frontends/sdf/io.py
|
lconaboy/yt
|
d97c3cf6d7911cd12b8337784d3232068ebc59f6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/sdf/io.py
|
lconaboy/yt
|
d97c3cf6d7911cd12b8337784d3232068ebc59f6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/sdf/io.py
|
lconaboy/yt
|
d97c3cf6d7911cd12b8337784d3232068ebc59f6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
from yt.funcs import mylog
from yt.utilities.io_handler import BaseParticleIOHandler
class IOHandlerSDF(BaseParticleIOHandler):
_dataset_type = "sdf_particles"
@property
def _handle(self):
return self.ds.sdf_container
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
data_files = set()
assert len(ptf) == 1
assert ptf.keys()[0] == "dark_matter"
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
assert len(data_files) == 1
for _data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
yield "dark_matter", (
self._handle["x"],
self._handle["y"],
self._handle["z"],
), 0.0
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
data_files = set()
assert len(ptf) == 1
assert ptf.keys()[0] == "dark_matter"
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
assert len(data_files) == 1
for _data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
for ptype, field_list in sorted(ptf.items()):
x = self._handle["x"]
y = self._handle["y"]
z = self._handle["z"]
mask = selector.select_points(x, y, z, 0.0)
del x, y, z
if mask is None:
continue
for field in field_list:
if field == "mass":
data = np.ones(mask.sum(), dtype="float64")
data *= self.ds.parameters["particle_mass"]
else:
data = self._handle[field][mask]
yield (ptype, field), data
def _identify_fields(self, data_file):
fields = [("dark_matter", v) for v in self._handle.keys()]
fields.append(("dark_matter", "mass"))
return fields, {}
def _count_particles(self, data_file):
pcount = self._handle["x"].size
if pcount > 1e9:
mylog.warning(
"About to load %i particles into memory. "
"You may want to consider a midx-enabled load",
pcount,
)
return {"dark_matter": pcount}
class IOHandlerHTTPSDF(IOHandlerSDF):
_dataset_type = "http_sdf_particles"
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
data_files = set()
assert len(ptf) == 1
assert ptf.keys()[0] == "dark_matter"
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
assert len(data_files) == 1
for _data_file in data_files:
pcount = self._handle["x"].size
yield "dark_matter", (
self._handle["x"][:pcount],
self._handle["y"][:pcount],
self._handle["z"][:pcount],
), 0.0
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
data_files = set()
assert len(ptf) == 1
assert ptf.keys()[0] == "dark_matter"
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
assert len(data_files) == 1
for _data_file in data_files:
pcount = self._handle["x"].size
for ptype, field_list in sorted(ptf.items()):
x = self._handle["x"][:pcount]
y = self._handle["y"][:pcount]
z = self._handle["z"][:pcount]
mask = selector.select_points(x, y, z, 0.0)
del x, y, z
if mask is None:
continue
for field in field_list:
if field == "mass":
if self.ds.field_info._mass_field is None:
pm = 1.0
if "particle_mass" in self.ds.parameters:
pm = self.ds.parameters["particle_mass"]
else:
raise RuntimeError
data = pm * np.ones(mask.sum(), dtype="float64")
else:
data = self._handle[self.ds.field_info._mass_field][:][mask]
else:
data = self._handle[field][:][mask]
yield (ptype, field), data
def _count_particles(self, data_file):
return {"dark_matter": self._handle["x"].http_array.shape}
class IOHandlerSIndexSDF(IOHandlerSDF):
_dataset_type = "midx_sdf_particles"
def _read_particle_coords(self, chunks, ptf):
dle = self.ds.domain_left_edge.in_units("code_length").d
dre = self.ds.domain_right_edge.in_units("code_length").d
for dd in self.ds.midx.iter_bbox_data(dle, dre, ["x", "y", "z"]):
yield "dark_matter", (dd["x"], dd["y"], dd["z"]), 0.0
def _read_particle_fields(self, chunks, ptf, selector):
dle = self.ds.domain_left_edge.in_units("code_length").d
dre = self.ds.domain_right_edge.in_units("code_length").d
required_fields = []
for field_list in sorted(ptf.values()):
for field in field_list:
if field == "mass":
continue
required_fields.append(field)
for dd in self.ds.midx.iter_bbox_data(dle, dre, required_fields):
for ptype, field_list in sorted(ptf.items()):
x = dd["x"]
y = dd["y"]
z = dd["z"]
mask = selector.select_points(x, y, z, 0.0)
del x, y, z
if mask is None:
continue
for field in field_list:
if field == "mass":
data = np.ones(mask.sum(), dtype="float64")
data *= self.ds.parameters["particle_mass"]
else:
data = dd[field][mask]
yield (ptype, field), data
def _count_particles(self, data_file):
dle = self.ds.domain_left_edge.in_units("code_length").d
dre = self.ds.domain_right_edge.in_units("code_length").d
pcount_estimate = self.ds.midx.get_nparticles_bbox(dle, dre)
if pcount_estimate > 1e9:
mylog.warning(
"Filtering %i particles to find total. "
"You may want to reconsider your bounding box.",
pcount_estimate,
)
pcount = 0
for dd in self.ds.midx.iter_bbox_data(dle, dre, ["x"]):
pcount += dd["x"].size
return {"dark_matter": pcount}
def _identify_fields(self, data_file):
fields = [("dark_matter", v) for v in self._handle.keys()]
fields.append(("dark_matter", "mass"))
return fields, {}
class IOHandlerSIndexHTTPSDF(IOHandlerSIndexSDF):
_dataset_type = "midx_http_sdf_particles"
| 37.84456
| 88
| 0.525876
|
87f8ee093002789dd1c76a60ab1bb919d52e1489
| 798
|
py
|
Python
|
brams_jams/song_manager/tests/testSongViewset.py
|
delainewendling/brams-jams
|
b57da0424bbaeb780f753b7648cafd34b7fc6573
|
[
"MIT"
] | null | null | null |
brams_jams/song_manager/tests/testSongViewset.py
|
delainewendling/brams-jams
|
b57da0424bbaeb780f753b7648cafd34b7fc6573
|
[
"MIT"
] | null | null | null |
brams_jams/song_manager/tests/testSongViewset.py
|
delainewendling/brams-jams
|
b57da0424bbaeb780f753b7648cafd34b7fc6573
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from model_mommy import mommy
from song_manager.models import Song
from rest_framework.test import APIClient
class TestGradeDataExport(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = mommy.make('song_manager.User', date_deleted=None)
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
def testCreateSong(self):
data = {
"name": "Test Song",
}
self.client.post("/song-manager/songs", data)
songs = Song.objects.all()
song = songs[1]
self.assertEqual(song.name, data['name'])
self.assertEqual(song.user, self.user)
self.assertIsNotNone(song.date_created)
self.assertIsNone(song.date_deleted)
| 27.517241
| 69
| 0.662907
|
a5399f46f6ba1eedc770ec86750e253867717fd4
| 9,396
|
py
|
Python
|
mrcnn/config.py
|
Andrew-M-Cox/Mask_RCNN
|
9e0249de1c5087ab267fe48e1f9ce698b4d7b4c4
|
[
"MIT"
] | null | null | null |
mrcnn/config.py
|
Andrew-M-Cox/Mask_RCNN
|
9e0249de1c5087ab267fe48e1f9ce698b4d7b4c4
|
[
"MIT"
] | null | null | null |
mrcnn/config.py
|
Andrew-M-Cox/Mask_RCNN
|
9e0249de1c5087ab267fe48e1f9ce698b4d7b4c4
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 100
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet50"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.002
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| 39.64557
| 80
| 0.689229
|
38d6dea1328e6f1569171aaac959c7ca9bff439d
| 38,315
|
py
|
Python
|
python/ray/tune/ray_trial_executor.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 1
|
2022-03-07T06:40:06.000Z
|
2022-03-07T06:40:06.000Z
|
python/ray/tune/ray_trial_executor.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 29
|
2021-11-24T00:50:07.000Z
|
2022-03-19T07:11:36.000Z
|
python/ray/tune/ray_trial_executor.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import copy
import inspect
import random
from collections import deque
from enum import Enum
from functools import partial
import logging
import os
import time
import traceback
from contextlib import contextmanager
from typing import (
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
)
import ray
from ray.exceptions import GetTimeoutError, RayTaskError
from ray.tune.error import (
AbortTrialExecution,
TuneError,
TuneStartTrialError,
TuneGetNextExecutorEventError,
)
from ray.tune.logger import NoopLogger
from ray.tune.result import TRIAL_INFO, STDOUT_FILE, STDERR_FILE
from ray.tune.utils.placement_groups import PlacementGroupManager, get_tune_pg_prefix
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.trial import Trial, _TuneCheckpoint, Location, TrialInfo
from ray.tune.trial_executor import TrialExecutor
from ray.tune.utils import warn_if_slow
from ray.tune.utils.resource_updater import ResourceUpdater
from ray.util import log_once
from ray.util.annotations import DeveloperAPI
from ray.util.placement_group import remove_placement_group, PlacementGroup
logger = logging.getLogger(__name__)
DEFAULT_GET_TIMEOUT = 60.0 # seconds
class _ActorClassCache:
"""Caches actor classes.
ray.remote is a registration call. It sends the serialized object to the
key value store (redis), and will be fetched at an arbitrary worker
later. Registration does not use any Ray scheduling resources.
Later, class.remote() actually creates the remote actor. The
actor will be instantiated on some arbitrary machine,
according to the underlying Ray scheduler.
Without this cache, you would register the same serialized object
over and over again. Naturally, since redis doesn’t spill to disk,
this can easily nuke the redis instance (and basically blow up Ray).
This cache instead allows us to register once and only once.
Note that we assume there can be multiple trainables in the
system at once.
"""
def __init__(self):
self._cache = {}
def get(self, trainable_cls):
"""Gets the wrapped trainable_cls, otherwise calls ray.remote."""
runtime_env = {"env_vars": {"TUNE_ORIG_WORKING_DIR": os.getcwd()}}
if trainable_cls not in self._cache:
remote_cls = ray.remote(runtime_env=runtime_env)(trainable_cls)
self._cache[trainable_cls] = remote_cls
return self._cache[trainable_cls]
_class_cache = _ActorClassCache()
class _LocalWrapper:
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
def post_stop_cleanup(future, pg):
"""Things to be done after a trial is stopped."""
assert isinstance(pg, PlacementGroup)
try:
# This should not be blocking as
# we are only here when triggered.
ray.get(future, timeout=0)
except GetTimeoutError:
if log_once("tune_trial_cleanup_timeout"):
logger.error(
"Timed out when trying to stop the Ray actor gracefully. "
"Consider making `stop` a faster operation."
)
except Exception:
if log_once("tune_trial_cleanup_exception"):
logger.error(
f"An exception occurred when trying to stop the Ray actor:"
f"{traceback.format_exc()}"
)
finally:
remove_placement_group(pg)
class _TrialCleanup:
"""Responsible for triggering force cleanup of remote actors,
without waiting for `Trainable.stop()` to finish.
Only instantiated when `TUNE_FORCE_TRIAL_CLEANUP_S` is set up.
"""
def __init__(self, force_cleanup):
assert force_cleanup
self._force_cleanup = force_cleanup
self._future_to_insert_time = deque()
def add(self, future):
self._future_to_insert_time.append((future, time.time()))
def get_next(self):
"""Get the next future that is eligible to be cleaned up forcibly."""
if (
len(self._future_to_insert_time) > 0
and self._future_to_insert_time[0][1] + self._force_cleanup < time.time()
):
return self._future_to_insert_time.popleft()
else:
return None
def is_empty(self):
return len(self._future_to_insert_time) == 0
def noop_logger_creator(config, logdir):
# Set the working dir in the remote process, for user file writes
os.makedirs(logdir, exist_ok=True)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(logdir)
return NoopLogger(config, logdir)
class ExecutorEventType(Enum):
"""The executor event type.
Some of the events are internal events to executor while others
are handled by runner."""
NO_RUNNING_TRIAL_TIMEOUT = 1
PG_READY = 2
TRAINING_RESULT = 3
SAVING_RESULT = 4
RESTORING_RESULT = 5
STOP_RESULT = 6 # Internally to executor only.
ERROR = 7 # This is to signal to TrialRunner that there is an error.
YIELD = 8 # Yielding back to TrialRunner's main event loop.
class ExecutorEvent:
"""A struct that describes the event to be processed by TrialRunner.
Attributes:
result: A dict with keys of "future_result" and "exception".
"future_result" is the corresponding result when future returns
successfully.
"exception" is the exception as caught during ``ray.get(future)``.
"""
KEY_FUTURE_RESULT = "future_result"
KEY_EXCEPTION = "exception"
def __init__(
self,
event_type: ExecutorEventType,
trial: Optional[Trial] = None,
result: Optional[Dict] = None,
):
self.type = event_type
self.trial = trial
self.result = result
def __repr__(self):
return f"[{self.type}] for {self.trial}"
@DeveloperAPI
class RayTrialExecutor(TrialExecutor):
"""An implementation of TrialExecutor based on Ray."""
def __init__(
self,
reuse_actors: bool = False,
result_buffer_length: Optional[int] = None,
refresh_period: Optional[float] = None,
):
super(RayTrialExecutor, self).__init__()
# future --> (type, trial/pg)
self._futures = {}
force_trial_cleanup = int(os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "0"))
self._get_next_event_wait = int(
os.environ.get("TUNE_GET_EXECUTOR_EVENT_WAIT_S", "5")
)
if force_trial_cleanup:
self._trial_cleanup = _TrialCleanup(force_trial_cleanup)
else:
self._trial_cleanup = None
self._resource_updater = ResourceUpdater(refresh_period)
self._has_cleaned_up_pgs = False
self._reuse_actors = reuse_actors
# The maxlen will be updated when `set_max_pending_trials()` is called
self._cached_actor_pg = deque(maxlen=1)
self._pg_manager = PlacementGroupManager(prefix=get_tune_pg_prefix())
self._staged_trials = set()
self._trial_just_finished = False
self._trial_just_finished_before = False
self.last_pg_recon = 0
self.pg_recon_interval = float(
os.environ.get("TUNE_PLACEMENT_GROUP_RECON_INTERVAL", "5")
)
self._buffer_length = result_buffer_length or int(
os.getenv("TUNE_RESULT_BUFFER_LENGTH", 1)
)
self._buffer_min_time_s = float(os.getenv("TUNE_RESULT_BUFFER_MIN_TIME_S", 0.0))
self._buffer_max_time_s = float(
os.getenv("TUNE_RESULT_BUFFER_MAX_TIME_S", 100.0)
)
def set_max_pending_trials(self, max_pending: int) -> None:
if len(self._cached_actor_pg) > 0:
logger.warning(
"Cannot update maximum number of queued actors for reuse "
"during a run."
)
else:
self._cached_actor_pg = deque(maxlen=max_pending)
self._pg_manager.set_max_staging(max_pending)
def _stage_and_update_status(self, trials: Iterable[Trial]):
"""Check and update statuses of scheduled placement groups.
Stages placement groups of all trials.
"""
if not self._has_cleaned_up_pgs:
# Clean up existing placement groups after trigger the tuning
# run step() method for the first time
self._pg_manager.cleanup_existing_pg()
self._has_cleaned_up_pgs = True
for trial in trials:
if trial.status not in (Trial.PENDING, Trial.PAUSED):
continue
if trial in self._staged_trials:
continue
if self._pg_manager.trial_in_use(trial):
continue
if not self._pg_manager.stage_trial_pg(trial):
# Break if we reached the limit of pending placement groups.
break
self._staged_trials.add(trial)
self._pg_manager.update_status()
def get_staged_trial(self):
"""Get a trial whose placement group was successfully staged.
Can also return None if no trial is available.
Returns:
Trial object or None.
"""
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_manager.has_ready(trial):
return trial
return None
def _setup_remote_runner(self, trial):
trial.init_logdir()
# We checkpoint metadata here to try mitigating logdir duplication
self._trials_to_cache.add(trial)
logger_creator = partial(noop_logger_creator, logdir=trial.logdir)
if len(self._cached_actor_pg) > 0:
assert self._reuse_actors
existing_runner, pg = self._cached_actor_pg.popleft()
logger.debug(f"Trial {trial}: Reusing cached runner " f"{existing_runner}")
trial.set_runner(existing_runner)
if pg:
self._pg_manager.assign_cached_pg(pg, trial)
if not self.reset_trial(
trial, trial.config, trial.experiment_tag, logger_creator
):
raise AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True."
)
return existing_runner
trainable_cls = trial.get_trainable_cls()
if not trainable_cls:
raise AbortTrialExecution(
f"Invalid trainable: {trial.trainable_name}. If you passed "
f"a string, make sure the trainable was registered before."
)
_actor_cls = _class_cache.get(trainable_cls)
if not self._pg_manager.has_ready(trial):
return None
full_actor_class = self._pg_manager.get_full_actor_cls(trial, _actor_cls)
# Clear the Trial's location (to be updated later on result)
# since we don't know where the remote runner is placed.
trial.set_location(Location())
logger.debug("Trial %s: Setting up new remote runner.", trial)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
trial_config = copy.deepcopy(trial.config)
trial_config[TRIAL_INFO] = TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
trial_config[STDOUT_FILE] = stdout_file
trial_config[STDERR_FILE] = stderr_file
kwargs = {
"config": trial_config,
"logger_creator": logger_creator,
}
if trial.uses_cloud_checkpointing:
# We keep these kwargs separate for backwards compatibility
# with trainables that don't provide these keyword arguments
kwargs["remote_checkpoint_dir"] = trial.remote_checkpoint_dir
kwargs["sync_function_tpl"] = trial.sync_function_tpl
# Throw a meaningful error if trainable does not use the
# new API
sig = inspect.signature(trial.get_trainable_cls())
try:
sig.bind_partial(**kwargs)
except Exception as e:
raise RuntimeError(
"Your trainable class does not accept a "
"`remote_checkpoint_dir` or `sync_function_tpl` argument "
"in its constructor, but you've passed a "
"`upload_dir` to your SyncConfig. Without accepting "
"these parameters and passing them to the base trainable "
"constructor in the init call, cloud checkpointing is "
"effectively disabled. To resolve this issue, add the "
"parameters to your trainable class constructor or "
"disable cloud checkpointing by setting `upload_dir=None`."
) from e
with self._change_working_directory(trial):
return full_actor_class.remote(**kwargs)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
if self._find_future(trial):
logging.debug(
"Trial {} already has a queued future. Skipping this "
"`train` call. This may occur if a trial has "
"been unpaused within a scheduler callback.".format(str(trial))
)
return
assert trial.status == Trial.RUNNING, trial.status
buffer_time_s = max(
self._buffer_min_time_s,
min(self._buffer_max_time_s, len(self._futures) // 10),
)
with self._change_working_directory(trial):
buffer_length = self._buffer_length
if buffer_length > 1 and trial.checkpoint_at_end:
# If a trial checkpoint can be triggered externally,
# it is not safe to buffer results.
if log_once("trial_executor_buffer_checkpoint"):
logger.warning(
"Disabling buffered training as you passed "
"`checkpoint_at_end` to `tune.run()`."
)
buffer_length = 1
if buffer_length > 1:
if trial.checkpoint_freq > 0:
buffer_length = min(buffer_length, trial.checkpoint_freq)
remote = trial.runner.train_buffered.remote(
buffer_time_s, buffer_length
)
else:
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._futures[remote] = (ExecutorEventType.TRAINING_RESULT, trial)
trial_item = self._find_future(trial)
assert len(trial_item) < 2, trial_item
def _start_trial(self, trial: Trial) -> bool:
"""Starts trial and restores last result if trial was paused.
Args:
trial: The trial to start.
Returns:
True if trial was started successfully, False otherwise.
See `RayTrialExecutor.restore` for possible errors raised.
"""
self.set_status(trial, Trial.PENDING)
runner = self._setup_remote_runner(trial)
if not runner:
return False
trial.set_runner(runner)
self.restore(trial)
self.set_status(trial, Trial.RUNNING)
if trial in self._staged_trials:
self._staged_trials.remove(trial)
if not trial.is_restoring:
self._train(trial)
return True
def _stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error: Whether to mark this trial as terminated in error.
exc: Optional exception.
"""
self.set_status(trial, Trial.ERROR if error or exc else Trial.TERMINATED)
self._trial_just_finished = True
trial.set_location(Location())
try:
trial.write_error_log(exc=exc)
if hasattr(trial, "runner") and trial.runner:
if (
not error
and self._reuse_actors
and (
len(self._cached_actor_pg)
< (self._cached_actor_pg.maxlen or float("inf"))
)
):
logger.debug("Reusing actor for %s", trial.runner)
# Move PG into cache (disassociate from trial)
pg = self._pg_manager.cache_trial_pg(trial)
if pg:
# True if a placement group was replaced
self._cached_actor_pg.append((trial.runner, pg))
should_destroy_actor = False
else:
# False if no placement group was replaced. This should
# only be the case if there are no more trials with
# this placement group factory to run
logger.debug(
f"Could not cache actor of trial {trial} for "
"reuse, as there are no pending trials "
"requiring its resources."
)
should_destroy_actor = True
else:
should_destroy_actor = True
if should_destroy_actor:
logger.debug("Trial %s: Destroying actor.", trial)
with self._change_working_directory(trial):
future = trial.runner.stop.remote()
pg = self._pg_manager.remove_from_in_use(trial)
self._futures[future] = (ExecutorEventType.STOP_RESULT, pg)
if self._trial_cleanup: # force trial cleanup within a deadline
self._trial_cleanup.add(future)
if trial in self._staged_trials:
self._staged_trials.remove(trial)
except Exception:
logger.exception("Trial %s: Error stopping runner.", trial)
self.set_status(trial, Trial.ERROR)
finally:
trial.set_runner(None)
def start_trial(self, trial: Trial) -> bool:
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial: Trial to be started.
Returns:
True if the remote runner has been started. False if trial was
not started (e.g. because of lacking resources/pending PG).
"""
try:
return self._start_trial(trial)
except AbortTrialExecution as e:
logger.exception("Trial %s: Error starting runner, aborting!", trial)
time.sleep(2)
self._stop_trial(trial, exc=e)
return False
except Exception as e:
logger.exception("Trial %s: Unexpected error starting runner.", trial)
time.sleep(2)
if isinstance(e, TuneError):
self._stop_trial(trial, exc=e)
else:
self._stop_trial(trial, exc=TuneStartTrialError(traceback.format_exc()))
# Note that we don't return the resources, since they may
# have been lost. TODO(ujvl): is this the right thing to do?
return False
def _find_future(self, trial):
out = [rid for rid, t in self._futures.items() if t[1] is trial]
assert (
len(out) <= 1
), "Expecting one future for any given trial at any given time."
return out
def stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
) -> None:
prior_status = trial.status
self._stop_trial(trial, error=error or exc, exc=exc)
if prior_status == Trial.RUNNING:
logger.debug("Trial %s: Returning resources.", trial)
out = self._find_future(trial)
for result_id in out:
self._futures.pop(result_id)
def continue_training(self, trial: Trial) -> None:
"""Continues the training of this trial."""
self._train(trial)
def reset_trial(
self,
trial: Trial,
new_config: Dict,
new_experiment_tag: str,
logger_creator: Optional[Callable[[Dict], "ray.tune.Logger"]] = None,
) -> bool:
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial: Trial to be reset.
new_config: New configuration for Trial trainable.
new_experiment_tag: New experiment name for trial.
logger_creator: Function that instantiates a logger on the
actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
# Pass magic variables
extra_config = copy.deepcopy(new_config)
extra_config[TRIAL_INFO] = TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
extra_config[STDOUT_FILE] = stdout_file
extra_config[STDERR_FILE] = stderr_file
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(extra_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
def has_resources_for_trial(self, trial: Trial) -> bool:
"""Returns whether there are resources available for this trial.
This will return True as long as we didn't reach the maximum number
of pending trials. It will also return True if the trial placement
group is already staged.
Args:
trial: Trial object which should be scheduled.
Returns:
boolean
"""
return (
trial in self._staged_trials
or self._pg_manager.can_stage()
or self._pg_manager.has_ready(trial, update=True)
)
def debug_string(self) -> str:
"""Returns a human readable message for printing to the console."""
total_resources = self._pg_manager.occupied_resources()
return self._resource_updater.debug_string(total_resources)
def on_step_begin(self, trials: List[Trial]) -> None:
"""Before step() is called, update the available resources."""
self._resource_updater.update_avail_resources()
self._trial_just_finished_before = self._trial_just_finished
self._trial_just_finished = False
def on_step_end(self, trials: List[Trial]) -> None:
self._do_force_trial_cleanup()
if time.time() > self.last_pg_recon + self.pg_recon_interval:
# Only do this every now and then - usually the placement groups
# should not get out of sync, and calling this often is inefficient
self._pg_manager.reconcile_placement_groups(trials)
self.last_pg_recon = time.time()
self._pg_manager.cleanup()
def _do_force_trial_cleanup(self) -> None:
if self._trial_cleanup:
while True:
next_future_to_clean = self._trial_cleanup.get_next()
if not next_future_to_clean:
break
if next_future_to_clean in self._futures.keys():
_, pg = self._futures.pop(next_future_to_clean)
post_stop_cleanup(next_future_to_clean, pg)
else:
# This just means that before the deadline reaches,
# the future is already cleaned up.
pass
def force_reconcilation_on_next_step_end(self) -> None:
self.last_pg_recon = -float("inf")
def save(
self,
trial: Trial,
storage: str = _TuneCheckpoint.PERSISTENT,
result: Optional[Dict] = None,
) -> _TuneCheckpoint:
"""Saves the trial's state to a checkpoint asynchronously.
Args:
trial: The trial to be saved.
storage: Where to store the checkpoint. Defaults to
PERSISTENT.
result: The state of this trial as a dictionary to be saved.
If result is None, the trial's last result will be used.
Returns:
Checkpoint object, or None if an Exception occurs.
"""
logger.debug(f"saving trial {trial}")
result = result or trial.last_result
with self._change_working_directory(trial):
if storage == _TuneCheckpoint.MEMORY:
value = trial.runner.save_to_object.remote()
checkpoint = _TuneCheckpoint(storage, value, result)
trial.on_checkpoint(checkpoint)
else:
value = trial.runner.save.remote()
checkpoint = _TuneCheckpoint(storage, value, result)
trial.saving_to = checkpoint
self._futures[value] = (ExecutorEventType.SAVING_RESULT, trial)
return checkpoint
def restore(self, trial: Trial) -> None:
"""Restores training state from a given model checkpoint.
Args:
trial: The trial to be restored.
Raises:
RuntimeError: This error is raised if no runner is found.
AbortTrialExecution: This error is raised if the trial is
ineligible for restoration, given the Tune input arguments.
"""
checkpoint = trial.checkpoint
if checkpoint.value is None:
return
if trial.runner is None:
raise RuntimeError(
"Trial {}: Unable to restore - no runner found.".format(trial)
)
value = checkpoint.value
node_ip = checkpoint.node_ip
if checkpoint.storage == _TuneCheckpoint.MEMORY:
logger.debug("Trial %s: Attempting restore from object", trial)
# Note that we don't store the remote since in-memory checkpoints
# don't guarantee fault tolerance and don't need to be waited on.
with self._change_working_directory(trial):
trial.runner.restore_from_object.remote(value)
else:
logger.debug("Trial %s: Attempting restore from %s", trial, value)
if trial.uses_cloud_checkpointing or not trial.sync_on_checkpoint:
# If using cloud checkpointing, trial will get cp from cloud.
# If not syncing to driver, assume it has access to the cp
# on the local fs.
with self._change_working_directory(trial):
remote = trial.runner.restore.remote(value, node_ip)
elif trial.sync_on_checkpoint:
# This provides FT backwards compatibility in the
# case where no cloud checkpoints are provided.
logger.debug("Trial %s: Reading checkpoint into memory", trial)
obj = TrainableUtil.checkpoint_to_object(value)
with self._change_working_directory(trial):
remote = trial.runner.restore_from_object.remote(obj)
else:
raise AbortTrialExecution(
"Pass in `sync_on_checkpoint=True` for driver-based trial"
"restoration. Pass in an `upload_dir` for remote "
"storage-based restoration"
)
self._futures[remote] = (ExecutorEventType.RESTORING_RESULT, trial)
trial.restoring_from = checkpoint
def export_trial_if_needed(self, trial: Trial) -> Dict:
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
with self._change_working_directory(trial):
return ray.get(
trial.runner.export_model.remote(trial.export_formats),
timeout=DEFAULT_GET_TIMEOUT,
)
return {}
def has_gpus(self) -> bool:
return self._resource_updater.get_num_gpus() > 0
def cleanup(self, trials: List[Trial]) -> None:
while True:
if self._trial_cleanup and self._trial_cleanup.is_empty():
break
elif not self._trial_cleanup and len(self._futures) == 0:
break
self._do_force_trial_cleanup()
ready, _ = ray.wait(list(self._futures.keys()), timeout=0)
if not ready:
continue
event_type, trial_or_pg = self._futures.pop(ready[0])
if event_type == ExecutorEventType.STOP_RESULT:
post_stop_cleanup(ready[0], trial_or_pg)
self._pg_manager.reconcile_placement_groups(trials)
self._pg_manager.cleanup(force=True)
self._pg_manager.cleanup_existing_pg(block=True)
@contextmanager
def _change_working_directory(self, trial):
"""Context manager changing working directory to trial logdir.
Used in local mode.
For non-local mode it is no-op.
"""
if ray.worker._mode() == ray.worker.LOCAL_MODE:
old_dir = os.getcwd()
try:
os.chdir(trial.logdir)
yield
finally:
os.chdir(old_dir)
else:
yield
def get_next_executor_event(
self, live_trials: Set[Trial], next_trial_exists: bool
) -> ExecutorEvent:
"""Get the next executor event to be processed in TrialRunner.
In case there are multiple events available for handling, the next
event is determined by the following priority:
1. if there is `next_trial_exists`, and if there is cached resources
to use, PG_READY is emitted.
2. if there is `next_trial_exists` and there is no cached resources
to use, wait on pg future and randomized other futures. If multiple
futures are ready, pg future will take priority to be handled first.
3. if there is no `next_trial_exists`, wait on just randomized other
futures.
An example of #3 would be synchronous hyperband. Although there are pgs
ready, the scheduler is holding back scheduling new trials since the
whole band of trials is waiting for the slowest trial to finish. In
this case, we prioritize handling training result to avoid deadlock
situation.
This is a blocking wait with a timeout (specified with env var).
The reason for the timeout is
we still want to print status info periodically in TrialRunner for
better user experience.
The handle of `ExecutorEvent.STOP_RESULT` is purely internal to
RayTrialExecutor itself. All the other future results are handled by
TrialRunner.
In the future we may want to do most of the handle of
`ExecutorEvent.RESTORE_RESULT` and `SAVING_RESULT` in
RayTrialExecutor itself and only notify TrialRunner to invoke
corresponding callbacks. This view is more consistent with our goal
of TrialRunner responsible for external facing Trial state transition,
while RayTrialExecutor responsible for internal facing transitions,
namely, `is_saving`, `is_restoring` etc.
Also you may notice that the boundary between RayTrialExecutor and
PlacementGroupManager right now is really blurry. This will be
improved once we move to an ActorPool abstraction.
`next_trial_exists` means that there is a trial to run - prioritize
returning PG_READY in this case.
"""
# First update status of staged placement groups
self._stage_and_update_status(live_trials)
while True:
###################################################################
# when next_trial_exists and there are cached resources
###################################################################
# There could be existing PGs from either `self._cached_actor_pg`
# or from `self._pg_manager._ready`. If so and if there is indeed
# a next trial to run, we return `PG_READY` future for trial
# runner. The next trial can then be scheduled on this PG.
if next_trial_exists:
if len(self._cached_actor_pg) > 0:
return ExecutorEvent(ExecutorEventType.PG_READY)
# TODO(xwjiang): Expose proper API when we decide to do
# ActorPool abstraction.
if any(len(r) > 0 for r in self._pg_manager._ready.values()):
return ExecutorEvent(ExecutorEventType.PG_READY)
###################################################################
# Prepare for futures to wait
###################################################################
futures_to_wait = list(self._futures.keys())
random.shuffle(futures_to_wait)
if next_trial_exists:
# Only wait for pg explicitly if there is next trial to run.
# In which case, handling PG_READY triumphs handling other events.
# Since we want to place pending trial ASAP.
futures_to_wait = (
self._pg_manager.get_staging_future_list() + futures_to_wait
)
logger.debug(
f"get_next_executor_event before wait with futures "
f"{futures_to_wait} and "
f"next_trial_exists={next_trial_exists}"
)
ready_futures, _ = ray.wait(
futures_to_wait, num_returns=1, timeout=self._get_next_event_wait
)
###################################################################
# Dealing with no future returned case.
###################################################################
if len(ready_futures) == 0:
if len(self._futures) == 0:
# No running trial and timing out with wait, could be we may
# have insufficient cluster resources that makes tune run
# infeasible.
# TODO: Move InsufficientResourceManager's logic
# to TrialExecutor. It is not Runner's responsibility!
return ExecutorEvent(ExecutorEventType.NO_RUNNING_TRIAL_TIMEOUT)
else:
# Training simply takes long time, yield the control back to main
# event loop to print progress info etc.
return ExecutorEvent(ExecutorEventType.YIELD)
###################################################################
# If there is future returned.
###################################################################
assert len(ready_futures) == 1
ready_future = ready_futures[0]
###################################################################
# If it is a PG_READY event.
###################################################################
if ready_future not in self._futures.keys():
self._pg_manager.handle_ready_future(ready_future)
return ExecutorEvent(ExecutorEventType.PG_READY)
###################################################################
# non PG_READY event
###################################################################
result_type, trial_or_pg = self._futures.pop(ready_future)
if result_type == ExecutorEventType.STOP_RESULT:
pg = trial_or_pg
post_stop_cleanup(ready_future, pg)
else:
trial = trial_or_pg
assert isinstance(trial, Trial)
try:
future_result = ray.get(ready_future)
# For local mode
if isinstance(future_result, _LocalWrapper):
future_result = future_result.unwrap()
if result_type in (
ExecutorEventType.TRAINING_RESULT,
ExecutorEventType.SAVING_RESULT,
ExecutorEventType.RESTORING_RESULT,
):
logger.debug(f"Returning [{result_type}] for trial {trial}")
return ExecutorEvent(
result_type,
trial,
result={ExecutorEvent.KEY_FUTURE_RESULT: future_result},
)
else:
raise TuneError(f"Unexpected future type - [{result_type}]")
except RayTaskError as e:
return ExecutorEvent(
ExecutorEventType.ERROR,
trial,
result={ExecutorEvent.KEY_EXCEPTION: e.as_instanceof_cause()},
)
except Exception:
return ExecutorEvent(
ExecutorEventType.ERROR,
trial,
result={
ExecutorEvent.KEY_EXCEPTION: TuneGetNextExecutorEventError(
traceback.format_exc()
)
},
)
| 39.745851
| 88
| 0.589482
|
7b5d495ff2e638157c4e7e1c0ce79a8bc81eb4bf
| 2,196
|
py
|
Python
|
src/helper/csv_from_xmls.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
src/helper/csv_from_xmls.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
src/helper/csv_from_xmls.py
|
ymlsam/deepgaau-detector
|
3f6d8195b4b1857bc1317035d999e9bca226ce7c
|
[
"MIT"
] | null | null | null |
import argparse
import glob
import os
import pandas as pd
from typing import Optional
from xml.etree import ElementTree
def csv_from_df(in_dir: str, sub_dir: str, df: pd.DataFrame) -> None:
df.to_csv(os.path.join(in_dir, sub_dir, '_' + sub_dir + '.csv'), index=None)
def csv_from_xmls(in_dir: str) -> None:
for sub_dir in ['train', 'dev', 'test']:
df = df_from_xmls(in_dir, sub_dir)
if df is None:
continue
csv_from_df(in_dir, sub_dir, df)
def df_from_xmls(in_dir: str, sub_dir: str) -> Optional[pd.DataFrame]:
xml_dir = os.path.join(in_dir, sub_dir)
if not os.path.isdir(xml_dir):
print('directory "{}" not exist'.format(xml_dir))
return
xml_files = glob.glob(os.path.join(in_dir, sub_dir, '*.xml'))
if len(xml_files) <= 0:
print('xml file not found in directory "{}"'.format(xml_dir))
return
rows = []
for xml_file in xml_files:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
fn = root.find('filename').text
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.findall('object'):
cls = obj.find('name').text
box = obj.find('bndbox')
xmin = int(box.find('xmin').text)
ymin = int(box.find('ymin').text)
xmax = int(box.find('xmax').text)
ymax = int(box.find('ymax').text)
rows.append((fn, w, h, cls, xmin, ymin, xmax, ymax))
return pd.DataFrame(rows, columns=['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'])
def main() -> None:
# argument parser
parser = argparse.ArgumentParser(
description='consolidate xml files into a csv file, for each of the sub-datasets',
)
parser.add_argument(
'-i', '--in_dir',
help='input folder containing train/dev/test sub-folders (default to current working directory)',
type=str,
default=os.getcwd(),
)
args = parser.parse_args()
# proceed
csv_from_xmls(args.in_dir)
if __name__ == '__main__':
main()
| 29.28
| 111
| 0.589709
|
5688402972a155eccdda8b8757367cacd6996928
| 584
|
py
|
Python
|
lecture/migrations/0004_evaluation.py
|
ysnayal17/Virtual-Classroom
|
9467c4045c4cf0523d5a34ada2d588543698734d
|
[
"MIT"
] | 14
|
2019-03-23T23:15:21.000Z
|
2021-09-20T04:46:57.000Z
|
lecture/migrations/0004_evaluation.py
|
ysnayal17/Virtual-Classroom
|
9467c4045c4cf0523d5a34ada2d588543698734d
|
[
"MIT"
] | 2
|
2019-05-02T06:38:28.000Z
|
2019-05-02T06:40:38.000Z
|
lecture/migrations/0004_evaluation.py
|
ysnayal17/Virtual-Classroom
|
9467c4045c4cf0523d5a34ada2d588543698734d
|
[
"MIT"
] | 12
|
2019-10-25T19:57:39.000Z
|
2022-01-31T12:38:51.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2018-09-18 14:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lecture', '0003_auto_20180915_0124'),
]
operations = [
migrations.CreateModel(
name='Evaluation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=500)),
],
),
]
| 25.391304
| 114
| 0.599315
|
c55508867d07f49ce05dfe47b8eb2696397b8732
| 22,373
|
py
|
Python
|
synapse/storage/databases/main/deviceinbox.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | 7
|
2020-07-03T13:51:31.000Z
|
2022-03-10T01:26:18.000Z
|
synapse/storage/databases/main/deviceinbox.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | 69
|
2019-09-09T13:54:30.000Z
|
2022-03-23T10:45:15.000Z
|
synapse/storage/databases/main/deviceinbox.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | 7
|
2020-04-24T17:04:40.000Z
|
2021-07-29T23:06:25.000Z
|
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional, Tuple
from synapse.logging import issue9533_logger
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.replication.tcp.streams import ToDeviceStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import DatabasePool
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.util import json_encoder
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
logger = logging.getLogger(__name__)
class DeviceInboxWorkerStore(SQLBaseStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
self._instance_name = hs.get_instance_name()
# Map of (user_id, device_id) to the last stream_id that has been
# deleted up to. This is so that we can no op deletions.
self._last_device_delete_cache = ExpiringCache(
cache_name="last_device_delete_cache",
clock=self._clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
)
if isinstance(database.engine, PostgresEngine):
self._can_write_to_device = (
self._instance_name in hs.config.worker.writers.to_device
)
self._device_inbox_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="to_device",
instance_name=self._instance_name,
tables=[("device_inbox", "instance_name", "stream_id")],
sequence_name="device_inbox_sequence",
writers=hs.config.worker.writers.to_device,
)
else:
self._can_write_to_device = True
self._device_inbox_id_gen = StreamIdGenerator(
db_conn, "device_inbox", "stream_id"
)
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_inbox",
entity_column="user_id",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_inbox_stream_cache = StreamChangeCache(
"DeviceInboxStreamChangeCache",
min_device_inbox_id,
prefilled_cache=device_inbox_prefill,
)
# The federation outbox and the local device inbox uses the same
# stream_id generator.
device_outbox_prefill, min_device_outbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_federation_outbox",
entity_column="destination",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_federation_outbox_stream_cache = StreamChangeCache(
"DeviceFederationOutboxStreamChangeCache",
min_device_outbox_id,
prefilled_cache=device_outbox_prefill,
)
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == ToDeviceStream.NAME:
self._device_inbox_id_gen.advance(instance_name, token)
for row in rows:
if row.entity.startswith("@"):
self._device_inbox_stream_cache.entity_has_changed(
row.entity, token
)
else:
self._device_federation_outbox_stream_cache.entity_has_changed(
row.entity, token
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
def get_to_device_stream_token(self):
return self._device_inbox_id_gen.get_current_token()
async def get_new_messages_for_device(
self,
user_id: str,
device_id: Optional[str],
last_stream_id: int,
current_stream_id: int,
limit: int = 100,
) -> Tuple[List[dict], int]:
"""
Args:
user_id: The recipient user_id.
device_id: The recipient device_id.
last_stream_id: The last stream ID checked.
current_stream_id: The current position of the to device
message stream.
limit: The maximum number of messages to retrieve.
Returns:
A list of messages for the device and where in the stream the messages got to.
"""
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_stream_id
)
if not has_changed:
return ([], current_stream_id)
def get_new_messages_for_device_txn(txn):
sql = (
"SELECT stream_id, message_json FROM device_inbox"
" WHERE user_id = ? AND device_id = ?"
" AND ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC"
" LIMIT ?"
)
txn.execute(
sql, (user_id, device_id, last_stream_id, current_stream_id, limit)
)
messages = []
for row in txn:
stream_pos = row[0]
messages.append(db_to_json(row[1]))
if len(messages) < limit:
stream_pos = current_stream_id
return messages, stream_pos
return await self.db_pool.runInteraction(
"get_new_messages_for_device", get_new_messages_for_device_txn
)
@trace
async def delete_messages_for_device(
self, user_id: str, device_id: Optional[str], up_to_stream_id: int
) -> int:
"""
Args:
user_id: The recipient user_id.
device_id: The recipient device_id.
up_to_stream_id: Where to delete messages up to.
Returns:
The number of messages deleted.
"""
# If we have cached the last stream id we've deleted up to, we can
# check if there is likely to be anything that needs deleting
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), None
)
set_tag("last_deleted_stream_id", last_deleted_stream_id)
if last_deleted_stream_id:
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_deleted_stream_id
)
if not has_changed:
log_kv({"message": "No changes in cache since last check"})
return 0
def delete_messages_for_device_txn(txn):
sql = (
"DELETE FROM device_inbox"
" WHERE user_id = ? AND device_id = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (user_id, device_id, up_to_stream_id))
return txn.rowcount
count = await self.db_pool.runInteraction(
"delete_messages_for_device", delete_messages_for_device_txn
)
log_kv({"message": f"deleted {count} messages for device", "count": count})
# Update the cache, ensuring that we only ever increase the value
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), 0
)
self._last_device_delete_cache[(user_id, device_id)] = max(
last_deleted_stream_id, up_to_stream_id
)
return count
@trace
async def get_new_device_msgs_for_remote(
self, destination, last_stream_id, current_stream_id, limit
) -> Tuple[List[dict], int]:
"""
Args:
destination(str): The name of the remote server.
last_stream_id(int|long): The last position of the device message stream
that the server sent up to.
current_stream_id(int|long): The current position of the device
message stream.
Returns:
A list of messages for the device and where in the stream the messages got to.
"""
set_tag("destination", destination)
set_tag("last_stream_id", last_stream_id)
set_tag("current_stream_id", current_stream_id)
set_tag("limit", limit)
has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
destination, last_stream_id
)
if not has_changed or last_stream_id == current_stream_id:
log_kv({"message": "No new messages in stream"})
return ([], current_stream_id)
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
return ([], last_stream_id)
@trace
def get_new_messages_for_remote_destination_txn(txn):
sql = (
"SELECT stream_id, messages_json FROM device_federation_outbox"
" WHERE destination = ?"
" AND ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC"
" LIMIT ?"
)
txn.execute(sql, (destination, last_stream_id, current_stream_id, limit))
messages = []
for row in txn:
stream_pos = row[0]
messages.append(db_to_json(row[1]))
if len(messages) < limit:
log_kv({"message": "Set stream position to current position"})
stream_pos = current_stream_id
return messages, stream_pos
return await self.db_pool.runInteraction(
"get_new_device_msgs_for_remote",
get_new_messages_for_remote_destination_txn,
)
@trace
async def delete_device_msgs_for_remote(
self, destination: str, up_to_stream_id: int
) -> None:
"""Used to delete messages when the remote destination acknowledges
their receipt.
Args:
destination: The destination server_name
up_to_stream_id: Where to delete messages up to.
"""
def delete_messages_for_remote_destination_txn(txn):
sql = (
"DELETE FROM device_federation_outbox"
" WHERE destination = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (destination, up_to_stream_id))
await self.db_pool.runInteraction(
"delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn
)
async def get_all_new_device_messages(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
"""Get updates for to device replication stream.
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updatees.
The updates are a list of 2-tuples of stream ID and the row data
"""
if last_id == current_id:
return [], current_id, False
def get_all_new_device_messages_txn(txn):
# We limit like this as we might have multiple rows per stream_id, and
# we want to make sure we always get all entries for any stream_id
# we return.
upper_pos = min(current_id, last_id + limit)
sql = (
"SELECT max(stream_id), user_id"
" FROM device_inbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY user_id"
)
txn.execute(sql, (last_id, upper_pos))
updates = [(row[0], row[1:]) for row in txn]
sql = (
"SELECT max(stream_id), destination"
" FROM device_federation_outbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY destination"
)
txn.execute(sql, (last_id, upper_pos))
updates.extend((row[0], row[1:]) for row in txn)
# Order by ascending stream ordering
updates.sort()
limited = False
upto_token = current_id
if len(updates) >= limit:
upto_token = updates[-1][0]
limited = True
return updates, upto_token, limited
return await self.db_pool.runInteraction(
"get_all_new_device_messages", get_all_new_device_messages_txn
)
@trace
async def add_messages_to_device_inbox(
self,
local_messages_by_user_then_device: dict,
remote_messages_by_destination: dict,
) -> int:
"""Used to send messages from this server.
Args:
local_messages_by_user_and_device:
Dictionary of user_id to device_id to message.
remote_messages_by_destination:
Dictionary of destination server_name to the EDU JSON to send.
Returns:
The new stream_id.
"""
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Add the local messages directly to the local inbox.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
# Add the remote messages to the federation outbox.
# We'll send them to a remote server when we next send a
# federation transaction to that destination.
self.db_pool.simple_insert_many_txn(
txn,
table="device_federation_outbox",
values=[
{
"destination": destination,
"stream_id": stream_id,
"queued_ts": now_ms,
"messages_json": json_encoder.encode(edu),
"instance_name": self._instance_name,
}
for destination, edu in remote_messages_by_destination.items()
],
)
if remote_messages_by_destination:
issue9533_logger.debug(
"Queued outgoing to-device messages with stream_id %i for %s",
stream_id,
list(remote_messages_by_destination.keys()),
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self.clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
for destination in remote_messages_by_destination.keys():
self._device_federation_outbox_stream_cache.entity_has_changed(
destination, stream_id
)
return self._device_inbox_id_gen.get_current_token()
async def add_messages_from_remote_to_device_inbox(
self, origin: str, message_id: str, local_messages_by_user_then_device: dict
) -> int:
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Check if we've already inserted a matching message_id for that
# origin. This can happen if the origin doesn't receive our
# acknowledgement from the first time we received the message.
already_inserted = self.db_pool.simple_select_one_txn(
txn,
table="device_federation_inbox",
keyvalues={"origin": origin, "message_id": message_id},
retcols=("message_id",),
allow_none=True,
)
if already_inserted is not None:
return
# Add an entry for this message_id so that we know we've processed
# it.
self.db_pool.simple_insert_txn(
txn,
table="device_federation_inbox",
values={
"origin": origin,
"message_id": message_id,
"received_ts": now_ms,
},
)
# Add the messages to the appropriate local device inboxes so that
# they'll be sent to the devices when they next sync.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self.clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_from_remote_to_device_inbox",
add_messages_txn,
now_ms,
stream_id,
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
return stream_id
def _add_messages_to_local_device_inbox_txn(
self, txn, stream_id, messages_by_user_then_device
):
assert self._can_write_to_device
local_by_user_then_device = {}
for user_id, messages_by_device in messages_by_user_then_device.items():
messages_json_for_user = {}
devices = list(messages_by_device.keys())
if len(devices) == 1 and devices[0] == "*":
# Handle wildcard device_ids.
devices = self.db_pool.simple_select_onecol_txn(
txn,
table="devices",
keyvalues={"user_id": user_id},
retcol="device_id",
)
message_json = json_encoder.encode(messages_by_device["*"])
for device_id in devices:
# Add the message for all devices for this user on this
# server.
messages_json_for_user[device_id] = message_json
else:
if not devices:
continue
rows = self.db_pool.simple_select_many_txn(
txn,
table="devices",
keyvalues={"user_id": user_id},
column="device_id",
iterable=devices,
retcols=("device_id",),
)
for row in rows:
# Only insert into the local inbox if the device exists on
# this server
device_id = row["device_id"]
message_json = json_encoder.encode(messages_by_device[device_id])
messages_json_for_user[device_id] = message_json
if messages_json_for_user:
local_by_user_then_device[user_id] = messages_json_for_user
if not local_by_user_then_device:
return
self.db_pool.simple_insert_many_txn(
txn,
table="device_inbox",
values=[
{
"user_id": user_id,
"device_id": device_id,
"stream_id": stream_id,
"message_json": message_json,
"instance_name": self._instance_name,
}
for user_id, messages_by_device in local_by_user_then_device.items()
for device_id, message_json in messages_by_device.items()
],
)
issue9533_logger.debug(
"Stored to-device messages with stream_id %i for %s",
stream_id,
[
(user_id, device_id)
for (user_id, messages_by_device) in local_by_user_then_device.items()
for device_id in messages_by_device.keys()
],
)
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
"device_inbox_stream_index",
index_name="device_inbox_stream_id_user_id",
table="device_inbox",
columns=["stream_id", "user_id"],
)
self.db_pool.updates.register_background_update_handler(
self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
)
async def _background_drop_index_device_inbox(self, progress, batch_size):
def reindex_txn(conn):
txn = conn.cursor()
txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id")
txn.close()
await self.db_pool.runWithConnection(reindex_txn)
await self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
return 1
class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore):
pass
| 38.244444
| 90
| 0.596344
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.