repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
guiccbr/autonomous-fuzzy-quadcopter
|
python/py_quad_control/controller/sparc.py
|
1
|
17796
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# ------------------------ Imports ----------------------------------#
from __future__ import division # Force real division
import numpy as np # Numpy Arrays
import math # Square root
# ------------------------ Classes ---------------------------------#
class SparcController:
def __init__(self, control_range, ref_range, input_size, init_input, init_ref, init_y, monotonicity=1,
dc_radius_const=0.5):
"""
Initiates a sparc controller using the first sample.
Keyword arguments:
control_range -- Tuple of two elements with the first element representing
the minimum value of the control signal, and the second
element representing its maximum. (float, float).
ref_range -- Tuple of two elements with the first element representing
the minimum value of the reference (desired plant state) the second
element representing its maximum. (float, float).
input_size -- Size of the input x (int)
init_input -- First input value (numpy array of size input_size)
init_ref
init_y
monotonicity
dc_radius_const -- DataCloud radius constant (see DataCloud class for more details)
"""
# Set constant values
self.umin, self.umax = control_range
self.refmin, self.refmax = ref_range
self.xsize = input_size
self.radius_update_const = dc_radius_const
self.k = 1 # Initial step number
# Global density recursive values Initial
self.g_csi = np.array([0.0] * (self.xsize + 1))
self.g_b = 0.0
# - Consequents normalization constant (Changes according to the current reference curve)
self.c = abs(float(self.umax - self.umin) / (self.refmax - self.refmin))
# - C signal is the same as the monotonicity
if monotonicity < 0:
self.c = -self.c
# Initial consequent will be proportinal to the error.
q_init = self.c * (init_ref - init_y)
# Initial input
curr_x = np.copy(init_input)
curr_z = np.append(curr_x, q_init)
# Instantiate a list of clouds
self.clouds = []
# Initiates SPARC with the first cloud, with an initial
# consequent given by q_init, and update the plant if first iteration.
initial_variance = np.array([0.0] * self.xsize)
self.clouds.append(DataCloud(curr_z, initial_variance, self.radius_update_const))
# Initializes array with membership degrees.
# md[i] corresponds to the degree of membership of the sample xk to the Data Cloud i
curr_md = np.array([1.0])
# Store last sample
self.prev_y = init_y
self.prev_ref = init_ref
self.prev_md = np.copy(curr_md)
self.prev_z = np.copy(curr_z)
# Update k before next iteration
self.k += 1
def update_reference_range(self, refmin, refmax):
"""
Update the Consequent normalization constant according to a new refmim, refmax.
:param refmin: Minimum value of the current reference curve.
:param refmax: Maximum value of the current reference curve.
:return: void
"""
self.refmax = refmax
self.refmin = refmin
self.c = float(self.umax - self.umin) / (self.refmax - self.refmin)
def update(self, curr_x, curr_y, curr_ref, prev_u):
"""
Calculate the output given an input and a reference.
Keyword arguments:
curr_x -- current data sample of dimension XSIZE (numpy array of size self.xsize)
curr_y -- current plant output value (float)
curr_ref -- current reference value (float)
prev_u -- value of the input finally applied to the plant (Truncated if needed)
Returns:
u -- output respective to curr_x (float)
"""
num_clouds = len(self.clouds)
#print 'num_clouds, curr_x:', num_clouds, curr_x
# (1) Updates the consequents of all clouds
for i in range(num_clouds):
self.clouds[i].update_consequent(self.prev_md[i], self.prev_ref, curr_y,
prev_u, self.c, self.umin, self.umax)
#print 'First Cloud (focal point, consequent):', self.clouds[0].zf
# (2) Find the the Data Cloud associated to the new sample
# First, calculate the relative local density relative to each cloud
relative_ld = [0.] * num_clouds
for i in range(num_clouds):
relative_ld[i] = self.clouds[i].get_local_density(curr_x)
# Second, calculate the normalized relative densities (membership degrees)
curr_md = [md / float(sum(relative_ld)) for md in relative_ld]
# Third, find the data cloud that better describes the current sample.
curr_x_associated_cloud = np.argmax(curr_md)
# (3) Generate control signal
curr_u = 0.0
for i in range(num_clouds):
curr_u += curr_md[i] * self.clouds[i].get_consequent()
# (4) Compute Global Density
# First, concatenates x and u to form z and compute global
curr_z = np.append(curr_x, curr_u)
# Second, calculate Global Density of curr_z
curr_gd = self.get_global_density(curr_z)
# (5) Perform two tests to check if a new cloud is needed or if it needs to be updated.
# First, Calculate the global density of the focal points of every existing Data Cloud.
focal_points_gd = np.array([0.] * num_clouds)
for i in range(num_clouds):
focal_points_gd[i] = self.get_global_density(self.clouds[i].zf)
# Second, Calculate the distances from the current sample to every focal point
focal_points_distances = np.array([0.] * num_clouds)
for i in range(num_clouds):
focal_points_distances[i] = np.linalg.norm(curr_x - self.clouds[i].zf[:self.xsize])
# Third, Check if the Global Density of the current point is bigger than the Global Densities of all
# the focal points.
curr_sample_global_density_is_better = False
if curr_gd > np.max(focal_points_gd):
curr_sample_global_density_is_better = True
# Fourth, Check if the point is far enough from every data cloud.
curr_sample_is_distant_enough = True
for i in range(num_clouds):
if focal_points_distances[i] <= np.max(self.clouds[i].r) / 2.:
curr_sample_is_distant_enough = False
# Inverse Alternative to FIFTH (Check if sample satisfies one sigma condition)
# If it's satisfied, a new cloud is not created.
# if np.max(relative_ld) > 1./math.e:
# curr_sample_is_distant_enough = False
# Fifth, If a new cloud is needed, creates a new cloud
# Otherwise, adds the current point to the best matching cloud and checks
# if the focal point has to be updated
new_cloud_needed = curr_sample_global_density_is_better and curr_sample_is_distant_enough
# If both conditions are satisfied (global density is better and sample is distant enough), create a new cloud
if new_cloud_needed:
# Get variances of all clouds to get the local scatter for the new cloud.
local_scatters = np.array([[0., 0.]] * num_clouds)
for i in range(num_clouds):
local_scatters[i][0] = math.sqrt(self.clouds[i].variance[0])
local_scatters[i][1] = math.sqrt(self.clouds[i].variance[1])
new_cloud_local_scatter = np.average(local_scatters, 0)
new_cloud_variance = new_cloud_local_scatter ** 2
# Creates new cloud with focal point zk and starting variance
self.clouds.append(DataCloud(curr_z, new_cloud_variance, self.radius_update_const))
# Update Membership degree to include this new cloud!
relative_ld.append(self.clouds[num_clouds].get_local_density(curr_x))
curr_md = [float(md) / sum(relative_ld) for md in relative_ld]
# If a new cloud is not needed, a focal point update might still be needed. If the local density of the current
# sample relative to the associated cloud is bigger than the local density of the focal point of the associated
# cloud relative to itself, and also if the global density of the current sample is bigger than the global
# density of the focal point of the associated cloud, than update the focal point.
# TEST: Add data sample to data cloud before updating focal point
# self.clouds[curr_x_associated_cloud].add_point(curr_z)
if not new_cloud_needed:
# Local density of the sample and the focal point relative to the associated cloud:
associated_cloud_xf = self.clouds[curr_x_associated_cloud].zf[:self.xsize]
associated_cloud_xf_ld = self.clouds[curr_x_associated_cloud].get_local_density(associated_cloud_xf)
curr_x_ld = self.clouds[curr_x_associated_cloud].get_local_density(curr_x)
# Global density of the sample and the focal point of the associated cloud:
associated_cloud_zf = self.clouds[curr_x_associated_cloud].zf
associated_cloud_zf_gd = self.get_global_density(associated_cloud_zf)
if curr_x_ld > associated_cloud_xf_ld and curr_gd > associated_cloud_zf_gd:
self.clouds[curr_x_associated_cloud].update_focal_point(curr_z)
# Add data sample to data cloud after updating focal point
self.clouds[curr_x_associated_cloud].add_point(curr_z)
# Update Global Density values g_csi and g_b
# Update global density recursive values
prev_gcsi = self.g_csi
prev_gb = self.g_b
self.g_csi = prev_gcsi + self.prev_z
self.g_b = prev_gb + np.dot(self.prev_z, self.prev_z)
# Store last sample
self.prev_md = np.copy(curr_md)
self.prev_ref = curr_ref
self.prev_y = curr_y
self.prev_z = np.copy(curr_z)
# Update k before next iteration
self.k += 1
# Return output u related to input curr_x
return curr_u
def get_global_density(self, z):
"""
Calculates recursively the Global Density of point curr_z.
Keyword arguments:
curr_z -- sample that will have its corresponding global density calculated.
"""
prev_z = self.prev_z
prev_gcsi = self.g_csi
prev_gb = self.g_b
gcsi_k = prev_gcsi + prev_z
ga_k = np.dot(z, gcsi_k)
gb_k = prev_gb + np.dot(prev_z, prev_z)
gd = float(self.k - 1) / ((self.k - 1) * (np.dot(z, z) + 1) - 2. * ga_k + gb_k)
return gd
class DataCloud:
"""
Class that represents a data cloud.
It stores the following information in the form of instance variables:
zf -- Focal point, composed by xf (data sample) and q (consequent)
csi, betha -- parameters for recursively calculation of local density
r -- array of radii, one for each dimension of X.
sigma_sq -- parameter for recursively calculation of radii. (variance)
m -- number of points added so far
z -- Last point added.
"""
def __init__(self, z, initial_variance, radius_update_const=0.5):
"""
Initializes a DataCloud with one point z.
Extracts x and u, setting u as the consequent q.
Keyword arguments:
z --
initial_variance -- array containing the variance starting value for the new DataCloud
radius_update_const -- Radius constant, usually 0.5
"""
# Set radius update constant
self.radius_update_const = radius_update_const
# Gets plant input (x) and control signal (u)
# from z where z = [x', u']', setting them
# as focal point (xf) and consequent (q) respectively.
self.zf = np.copy(z)
self.xsize = len(z) - 1
# Local density calculation values
self.csi = np.array([0.0] * self.xsize)
self.betha = 0.0
# Data Cloud Size
self.m = 1
# Data Cloud Radius
# Each data cloud has X_SIZE radiuses, one for each dimension of x.
# By definition the initial radius r1 is 1 for each dimension.
self.r = np.array([1.0] * self.xsize)
# Local Scatter square (sigma_square), has to be stored for recursive calculation of
# the radius. For each dimension of x, there's a sigma associated to it.
# By definition the initial sigma sigma1 is 1 if not provided
self.variance = np.copy(initial_variance)
# Save previous added point for next calculations
self.prev_z = np.copy(z)
def update_focal_point(self, z):
"""
Update focal point. Just updates self.zf. Does not increment the size of the data cloud, neither
updates radius or variance. Usually add_point is called right after.
Keyword arguments:
z -- datacloud point composed by x (data sample) and u (control signal)
"""
self.zf = z
def __update_radius__(self):
"""
Update radius of the Data Cloud recursively.
It needs to be called after a new point is added to the Cloud.
"""
p = self.radius_update_const
for i in range(0, len(self.r)):
self.r[i] = p * self.r[i] + (1 - p) * math.sqrt(self.variance[i])
def __update_variance_and_centroid__(self, curr_z):
"""
Update the local scatter square of the Data Cloud recursively.
The local scatter ( sigma ) is needed to update the radius.
Keyword arguments:
curr_z -- Last added sample
"""
# Extract X and centroid
x = curr_z[:self.xsize]
# Calculate New Centroid (OLD WAY)
# for i in range(0, len(self.centroid)):
# new_centroid[i] = (self.centroid[i] * (self.m - 1) + curr_z[i]) / self.m
# Calulate and Update New Variance (OLD WAY _ WITH CENTROID)
# for i in range(0, len(self.variance)):
# prev_variance = self.variance[i]
# self.variance[i] = (1.0 / self.m) * (
# (self.m - 1) * prev_variance + (x[i] - self.centroid[i]) * (x[i] - new_centroid[i]))
# Calulate and Update New Variance (NEW WAY _ WITH FOCAL POINT)
# for i in range(0, len(self.variance)):
# # dist_x_f = self.zf[:self.xsize] - x
# dist_z_f = self.zf - curr_z
# self.variance[i] = self.variance[i]*float(self.m-1)/self.m + np.dot(dist_z_f, dist_z_f)/float(self.m-1)
# Calulate and Update New Variance (NEW WAY _ WITH FOCAL POINT)
for i in range(len(self.variance)):
dist_x_f = self.zf[:self.xsize][i] - x[i]
self.variance[i] = self.variance[i] * float(self.m - 1) / self.m + (dist_x_f ** 2) / float(self.m - 1)
# Update centroid (OLD WAY)
# self.centroid = new_centroid
def add_point(self, curr_z):
"""
Associates a new point to the data cloud, updating the number of points
updating local density values, sigma and radius.
Keyword arguments:
curr_z -- datacloud point composed by x (data sample) and u (control signal)
"""
# Update number of points
self.m += 1
# Update Variance
self.__update_variance_and_centroid__(curr_z)
# Update radius
self.__update_radius__()
# Update local density values
prev_x = self.prev_z[:self.xsize]
prev_csi = self.csi
prev_b = self.betha
self.csi = prev_csi + prev_x
self.betha = prev_b + np.dot(prev_x, prev_x)
# Update Prev values (last added point):
self.prev_z = np.copy(curr_z)
def get_local_density(self, x):
"""
Recursively calculate the local density relative to the sample input x
Keyword arguments:
x -- an input of dimension XSIZE
"""
prev_x = self.prev_z[:self.xsize]
prev_csi = self.csi
prev_b = self.betha
csi_k = prev_csi + prev_x
a_k = np.dot(x, csi_k)
b_k = prev_b + np.dot(prev_x, prev_x)
ld = float(self.m) / (self.m * (np.dot(x, x) + 1) - 2. * a_k + b_k)
return ld
def update_consequent(self, prev_md, prev_ref, curr_y, prev_u, c, umin, umax):
"""
Updates consequent
Keyword arguments:
prev_md -- membership degree of the previous data sample related to this cloud.
prev_ref -- previous reference value
curr_y -- current plant output value
prev_u -- previous control signal
C -- Consequent constant calculated by: C = (UMAX - UMIN)/(REFMAX - REFMIN)
umin, umax -- Control Signal range, use determine if the consequent should be penalized.
"""
# Calculate relative error:
e = (prev_ref - curr_y)
# Calculate consequent differential
dq = c * prev_md * e
# Checks if control signal maximum or minimum has been reached
# to prevent penalization on these cases
if (prev_u <= umin) and (dq < 0):
dq = 0.0
if (prev_u >= umax) and (dq > 0):
dq = 0.0
# Get Consequent
q = self.get_consequent()
# Updates consequent
self.set_consequent(q + dq)
def set_consequent(self, new_consequent):
self.zf[-1] = new_consequent
def get_consequent(self):
"""
Extract consequent value from the focal point of the data cloud (zf).
"""
return self.zf[-1]
|
mit
| 6,382,856,539,554,038,000
| 38.284768
| 119
| 0.600079
| false
| 3.778344
| false
| false
| false
|
rtts/qqq
|
user_profile/views.py
|
1
|
3338
|
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required
from user_profile.models import Profile
from user_profile.forms import *
from messages.models import Message
from qqq.models import Contribution
from messages.views import reply as dm_reply
from messages.views import delete as dm_delete
from messages.views import compose as dm_compose
from messages.utils import format_quote
from datetime import datetime
import types
def get_profile(user):
"""Returns a user's profile, and creates one if it doesn't exist
yet. (Should've been implemented in some auth module, but just
in case...)"""
try:
p = Profile.objects.get(user=user)
except Profile.DoesNotExist:
p = Profile(description='', user=user)
p.save()
return p
def view_profile(request, username):
t = get_template('profile.html')
c = RequestContext(request)
user = get_object_or_404(User, username=username)
profile = get_profile(user)
c['karma'] = profile.karma
c['description'] = profile.description
c['feed'] = user.contributions.all().select_related('user', 'question', 'revision', 'post', 'tagaction')[:25]
c['username'] = username
return HttpResponse(t.render(c))
@login_required
def view_message(request, id):
t = get_template('view_pm.html')
c = RequestContext(request)
msg = get_object_or_404(Message, id=id)
msg.read_at = datetime.now()
msg.save()
c['msg'] = msg
return HttpResponse(t.render(c))
@login_required
def edit_profile(request):
t = get_template('edit_profile.html')
c = RequestContext(request)
profile = get_profile(request.user)
if request.method == 'POST':
form = ProfileForm(request.POST)
if form.is_valid():
form.save(profile)
return HttpResponseRedirect(reverse(view_profile, args=[request.user.username]))
else:
form = ProfileForm(initial={'description': profile.description})
c['form'] = form
c['username'] = request.user.username
return HttpResponse(t.render(c))
def sent(request):
c = RequestContext(request)
t = get_template('pm_sent.html')
return HttpResponse(t.render(c))
@login_required
def compose(request, username):
t = get_template('send-pm.html')
c = RequestContext(request)
next = reverse(sent)
recipient = get_object_or_404(User, username=username)
if 'parent' in request.GET:
parent = get_object_or_404(Message, id=request.GET['parent'])
else:
parent = False
if request.method == 'POST':
form = MessageForm(request.POST)
if form.is_valid():
form.save(sender=request.user, recipient=recipient, parent=parent)
return HttpResponseRedirect(next)
else:
if parent:
body = format_quote(parent.sender, parent.body)
else:
body = ''
form = MessageForm(initial = {'body': body})
c['form'] = form
c['username'] = username
return HttpResponse(t.render(c))
@login_required
def delete(request):
if 'message' in request.GET:
return dm_delete(request, request.GET['message'], success_url="/")
else:
raise Http404
|
gpl-3.0
| 1,092,736,102,146,066,000
| 29.623853
| 111
| 0.713901
| false
| 3.684327
| false
| false
| false
|
shady831213/myBlog
|
myBlog/articles/permission.py
|
1
|
1112
|
from rest_framework import permissions
#adminorreadonly
class ArticlePermission(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS or
request.user and request.user.is_staff
)
def has_object_permission(self, request, view, obj):
return (
request.user.is_staff or obj.status == 'published' and request.method in permissions.SAFE_METHODS
)
class ArticleLonginPermission(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS or
request.user and request.user.is_authenticated
)
def has_object_permission(self, request, view, obj):
return (
request.user.is_staff or
request.user.is_authenticated and request.method == 'POST' or
obj.author == request.user and request.method in ('PUT', 'GET','PATCH', 'DELETE') or
obj.status == 'published' and request.method in permissions.SAFE_METHODS
)
|
mit
| 2,163,750,632,873,102,600
| 37.37931
| 109
| 0.652878
| false
| 4.412698
| false
| false
| false
|
Exesium/python_training
|
fixture/session.py
|
1
|
1504
|
# -*- coding: utf-8 -*-
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("pass").click()
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.logged_user() == username
def logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
|
gpl-3.0
| 6,056,223,281,163,196,000
| 30.333333
| 76
| 0.569149
| false
| 3.364653
| false
| false
| false
|
padajuan/ansible-module-etcd
|
library/etcd.py
|
1
|
9055
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: etcd
short_description: Set and delete values from etcd
description:
- Sets or deletes values in etcd.
- Parent directories of the key will be created if they do not already exist.
version_added: "2.4"
author: Juan Manuel Parrilla (@padajuan)
requirements:
- python-etcd >= 0.3.2
options:
state:
description:
- This will be the state of the key in etcd
- after this module completes its operations.
required: true
choices: [present, absent]
default: null
protocol:
description:
- The scheme to connect to ETCD
required: false
default: http
choices: [http, https]
host:
description:
- The etcd host to use
required: false
default: 127.0.0.1
port:
description:
- The port to use on the above etcd host
required: false
default: 4001
api_version:
description:
- Api version of ETCD endpoint
required: false
default: '/v2'
key:
description:
- The key in etcd at which to set the value
required: true
default: null
value:
description:
- The value to be set in etcd
required: true
default: null
override:
description:
- Force the overwriting of a key-value on etcd
required: false
default: false
allow_redirect:
description:
- Etcd attempts to redirect all write requests to the etcd master
- for safety reasons. If allow_redirect is set to false, such
- redirection will not be allowed. In this case, the value for `host`
- must be the etcd leader or this module will err.
required: false
default: true
read_timeout:
description:
- Time limit for a read request agains ETCD
required: false
default: 60
cert:
description:
- Certificate to connect to an ETCD server with SSL
required: false
default: None
cert_ca:
description:
- CA Certificate to connect to an ETCD server with SSL
required: false
default: None
username:
description:
- Username to connect to ETCD with RBAC activated
required: false
default: None (by default etcd will use guest)
password:
description:
- Password to authenticate to ETCD with RBAC activated
required: false
default: None
notes:
- Do not override the value stored on ETCD, you must specify it.
- Based on a module from Rafe Colton
- Adapted from https://github.com/modcloth-labs/ansible-module-etcd
- The python-etcd bindings are not still compatible with v1 and v3 of
ETCD api endpoint, then we will not work with it.
- I will try to contribute with python-etcd to make it compatible
with those versions.
"""
EXAMPLES = """
---
# set a value in etcd
- etcd:
state=present
host=my-etcd-host.example.com
port=4001
key=/asdf/foo/bar/baz/gorp
value=my-foo-bar-baz-gor-server.prod.example.com
# delete a value from etcd
- etcd:
state=absent
host=my-etcd-host.example.com
port=4001
key=/asdf/foo/bar/baz/gorp
# override an existant ETCD value
- etcd:
state: present
host: 127.0.0.1
port: 2379
key: "test"
value: "test_value"
override: True
# override a value through SSL connection
- etcd:
state: present
protocol: https
host: etcd.globalcorp.com
port: 2379
key: "test"
value: "test_value"
cert: /path/to/cert
ca_cert: /path/to/CA
override: True
# delete an ETCD value with a user and password
- etcd:
state: absent
host: 127.0.0.1
port: 2379
username: 'user'
password: 'P4ssW0rd'
"""
RETURN = '''
---
key:
description: The key quieried
returned: success
type: string
value:
description: The result of the write on ETCD
returned: sucess
type: dictionary
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import ConnectionError
try:
import etcd
etcd_found = True
except ImportError:
etcd_found = False
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
protocol=dict(required=False, default='http', choices=['http', 'https']),
host=dict(required=False, default='127.0.0.1'),
port=dict(required=False, default=4001, type='int'),
api_version=dict(required=False, default='/v2'),
key=dict(required=True),
value=dict(required=False, default=None),
override=dict(required=False, default=False),
allow_redirect=dict(required=False, default=True),
read_timeout=dict(required=False, default=60, type='int'),
cert=dict(required=False, default=None),
ca_cert=dict(required=False, default=None),
username=dict(required=False, default=None),
password=dict(required=False, default=None, no_log=True)
),
supports_check_mode=True
)
if not etcd_found:
module.fail_json(msg="the python etcd module is required")
# For now python-etcd is not compatible with ETCD v1 and v3 api version
# Contributing on https://github.com/jplana/python-etcd.
# The entry point at this module is prepared for other versions.
if module.params['api_version'] != '/v2':
module.fail_json(msg="This module only support v2 of ETCD, for now")
# State
state = module.params['state']
# Target info
target_scheme = module.params['protocol']
target_host = module.params['host']
target_port = int(module.params['port'])
target_version = module.params['api_version']
# K-V
key = module.params['key']
value = module.params['value']
# Config
override = module.params['override']
if state == 'present' and not value:
module.fail_json(msg='Value is required with state="present".')
kwargs = {
'protocol': target_scheme,
'host': target_host,
'port': target_port,
'version_prefix': target_version,
'allow_redirect': module.params['allow_redirect'],
'read_timeout': int(module.params['read_timeout']),
'cert': module.params['cert'],
'ca_cert': module.params['ca_cert'],
'username': module.params['username'],
'password': module.params['password']
}
client = etcd.Client(**kwargs)
change = False
prev_value = None
# Attempt to get key
try:
# Getting ETCD Value
prev_value = client.get(key).value
except etcd.EtcdKeyNotFound:
# There is not value on ETCD
prev_value = None
# Handle check mode
if module.check_mode:
if ((state == 'absent' and prev_value is not None) or
(state == 'present' and prev_value != value)):
change = True
module.exit_json(changed=change)
if state == 'present' and prev_value is None:
# If 'Present' and there is not a previous value on ETCD
try:
set_res = client.write(key, value)
change = True
except ConnectionError:
module.fail_json(msg="Cannot connect to target.")
elif state == 'present' and prev_value is not None:
# If 'Present' and exists a previous value on ETCD
if prev_value == value:
# The value to set, is already present
change = False
elif override == 'True':
# Trying to Override already existant key on ETCD with flag
set_res = client.write(key, value)
change = True
else:
# Trying to Override already existant key on ETCD without flag
module.fail_json(msg="The Key '%s' is already set with '%s', exiting..." % (key, prev_value))
elif state == 'absent':
if prev_value is not None:
try:
set_res = client.delete(key)
change = True
except ConnectionError:
module.fail_json(msg="Cannot connect to target.")
results = {
'changed': change,
'value': value,
'key': key
}
module.exit_json(**results)
if __name__ == "__main__":
main()
|
mit
| -4,966,589,647,599,316,000
| 27.564669
| 105
| 0.631695
| false
| 3.841748
| false
| false
| false
|
meeb/txcloudfiles
|
examples/account_set_temp_key.py
|
1
|
2390
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2012 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Trivial example of how to set an account temporary URL key. See:
http://docs.rackspace.com/files/api/v1/cf-devguide/content/Set_Account_Metadata-d1a4460.html
'''
import os, sys
# make sure our local copy of txcloudfiles is in sys.path
PATH_TO_TXCF = '../txcloudfiles/'
try:
import txcloudfiles
except ImportError:
txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF))
if txcfpath not in sys.path:
sys.path.insert(0, txcfpath)
from hashlib import sha256
from twisted.internet import reactor
from txcloudfiles import get_auth, UK_ENDPOINT, US_ENDPOINT
def _got_session(session):
print '> got session: %s' % session
random_key = sha256(os.urandom(256)).hexdigest()
def _ok((response, v)):
'''
'response' is a transport.Response() instance.
'v' is boolean True.
'''
print '> got response: %s' % response
print '> set temp url key to:'
print random_key
reactor.stop()
print '> sending request'
# 'key' here is any random string to set as the temporary URL key
session.set_temp_url_key(key=random_key).addCallback(_ok).addErrback(_error)
def _error(e):
'''
'e' here will be a twisted.python.failure.Failure() instance wrapping
a ResponseError() object. ResponseError() instances contain information
about the request to help you find out why it errored through its
ResponseError().request attribute.
'''
print 'error!'
print e.printTraceback()
reactor.stop()
auth = get_auth(UK_ENDPOINT, os.environ.get('TXCFUSR', ''), os.environ.get('TXCFAPI', ''))
auth.get_session().addCallback(_got_session).addErrback(_error)
reactor.run()
'''
EOF
'''
|
apache-2.0
| 9,021,300,464,980,471,000
| 28.506173
| 96
| 0.673222
| false
| 3.811802
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/errors/types/partial_failure_error.py
|
1
|
1168
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"PartialFailureErrorEnum",},
)
class PartialFailureErrorEnum(proto.Message):
r"""Container for enum describing possible partial failure
errors.
"""
class PartialFailureError(proto.Enum):
r"""Enum describing possible partial failure errors."""
UNSPECIFIED = 0
UNKNOWN = 1
PARTIAL_FAILURE_MODE_REQUIRED = 2
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -6,045,782,478,356,995,000
| 29.736842
| 74
| 0.708048
| false
| 4.069686
| false
| false
| false
|
sequana/sequana
|
sequana/modules_report/bwa_bam_to_fastq.py
|
1
|
3230
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write coverage report"""
import os
import glob
import io
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
import colorlog
logger = colorlog.getLogger(__name__)
from sequana.utils.datatables_js import DataTable
class BWABAMtoFastQModule(SequanaBaseModule):
""" Write HTML report of BWA mapping (phix)"""
def __init__(self, input_directory, output_filename=None):
"""
:param input_directory: the directory of the bwa_bam_to_fastq output
:param output_filename: if not provided, the HTML is not created.
"""
super().__init__()
self.directory = input_directory + os.sep
self.create_report_content()
if output_filename:
self.create_html(output_filename)
def create_report_content(self):
""" Generate the sections list to fill the HTML report.
"""
self.sections = list()
self.add_stats()
def _get_html_stats(self):
from sequana.tools import StatsBAM2Mapped
from easydev import precision
data = StatsBAM2Mapped(self.directory + "bwa_mem_stats.json").data
html = "Reads with Phix: %s %%<br>" % precision(data['contamination'], 3)
# add HTML table
if "R2_mapped" in data.keys():
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']],
'R2': [data['R2_mapped'], data['R2_unmapped']]})
else:
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']]})
df.index = ['mapped', 'unmapped']
datatable = DataTable(df, "bwa_bam")
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 30,
'scrollCollapse': 'true',
'dom': 'irtpB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
#html += "{} {}".format(html_tab, js)
html += "Unpaired: %s <br>" % data['unpaired']
html += "duplicated: %s <br>" % data['duplicated']
return html
def _get_html_mapped_stats(self):
html = ""
return html
def add_stats(self):
html1 = self._get_html_stats()
html2 = self._get_html_mapped_stats()
self.sections.append({
"name": "Stats inputs",
"anchor": "stats",
"content": html1+html2
})
|
bsd-3-clause
| 6,454,543,908,194,447,000
| 30.666667
| 81
| 0.586378
| false
| 3.573009
| false
| false
| false
|
ramondiez/machine-learning
|
ex2/plotDecisionBoundary.py
|
1
|
1679
|
'''
Created on 20 feb. 2017
@author: fara
'''
import numpy as np
from matplotlib import pyplot as plt
from mapFeature import mapFeature
from show import show
def plotDecisionBoundary(ax,theta, X, y):
'''
%PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
%the decision boundary defined by theta
% PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
% positive examples and o for the negative examples. X is assumed to be
% a either
% 1) Mx3 matrix, where the first column is an all-ones column for the
% intercept.
% 2) MxN, N>3 matrix, where the first column is all-ones
'''
if X.shape[1] <= 3:
# Only need 2 points to define a line, so choose two endpoints
plot_x = np.array([min(X[:, 2]), max(X[:, 2])])
# Calculate the decision boundary line
plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])
# Plot, and adjust axes for better viewing
ax.plot(plot_x, plot_y)
# Legend, specific for the exercise
plt.legend(['Admitted', 'Not admitted'],loc='upper right', fontsize='x-small', numpoints=1)
plt.axis([30, 100, 30, 100])
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z= np.array([mapFeature(u[i], v[j]).dot(theta) for i in range(len(u)) for j in range(len(v))])
#Reshape to get a 2D array
z=np.reshape(z, (50, 50))
#Draw the plot
plt.contour(u,v,z, levels=[0.0])
|
gpl-3.0
| -6,819,960,103,957,050,000
| 29
| 102
| 0.564622
| false
| 3.542194
| false
| false
| false
|
n3wb13/OpenNfrGui-5.0-1
|
lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ubi/block/sort.py
|
5
|
2187
|
def list_by_list(blist, slist):
slist_blocks = []
for block in blist:
if block in slist:
slist_blocks.append(block)
return slist_blocks
def by_image_seq(blocks, image_seq):
seq_blocks = []
for block in blocks:
if blocks[block].ec_hdr.image_seq == image_seq:
seq_blocks.append(block)
return seq_blocks
def by_range(blocks, block_range):
peb_range = range(block_range[0], block_range[1])
return [ i for i in blocks if i in peb_range ]
def by_leb(blocks):
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += ['x'] * add_elements
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist
return sorted(blocks.iterkeys(), key=lambda x: blocks[x].leb_num)
def by_vol_id(blocks, slist = None):
vol_blocks = {}
for i in blocks:
if slist and i not in slist:
continue
elif not blocks[i].is_valid:
continue
if blocks[i].vid_hdr.vol_id not in vol_blocks:
vol_blocks[blocks[i].vid_hdr.vol_id] = []
vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num)
return vol_blocks
def clean_bad(blocks, slist = None):
clean_blocks = []
for i in range(0, len(blocks)):
if slist and i not in slist:
continue
if blocks[i].is_valid:
clean_blocks.append(i)
return clean_blocks
def by_type(blocks, slist = None):
layout = []
data = []
int_vol = []
unknown = []
for i in blocks:
if slist and i not in slist:
continue
if blocks[i].is_vtbl and blocks[i].is_valid:
layout.append(i)
elif blocks[i].is_internal_vol and blocks[i].is_valid:
int_vol.append(i)
elif blocks[i].is_valid:
data.append(i)
else:
unknown.append(i)
return (layout,
data,
int_vol,
unknown)
|
gpl-2.0
| 8,051,677,869,966,243,000
| 23.752941
| 70
| 0.54321
| false
| 3.417188
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/gmake/package.py
|
1
|
2179
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gmake(AutotoolsPackage):
"""GNU Make is a tool which controls the generation of executables and
other non-source files of a program from the program's source files."""
homepage = "https://www.gnu.org/software/make/"
url = "https://ftp.gnu.org/gnu/make/make-4.2.1.tar.gz"
version('4.2.1', '7d0dcb6c474b258aab4d54098f2cf5a7')
version('4.0', 'b5e558f981326d9ca1bfdb841640721a')
variant('guile', default=False, description='Support GNU Guile for embedded scripting')
depends_on('guile', when='+guile')
build_directory = 'spack-build'
def configure_args(self):
args = []
if '+guile' in self.spec:
args.append('--with-guile')
else:
args.append('--without-guile')
return args
@run_after('install')
def symlink_gmake(self):
with working_dir(self.prefix.bin):
symlink('make', 'gmake')
|
lgpl-2.1
| 6,446,346,152,501,126,000
| 37.22807
| 91
| 0.65397
| false
| 3.843034
| false
| false
| false
|
carlshan/ml_workflow
|
datascience_tools/modeling/workflow_model_setup.py
|
1
|
2346
|
from workflow_diagnostics import get_diagnostics_dict
from workflow_util import upload_to_s3
from sklearn import preprocessing
import cPickle as pickle
import pandas as pd
import os
def run_model(training, testing, features, outcome, clf,
clf_name, normalize=True, verbose=True):
# NOTE: You should set the clf seed ahead of time
if verbose:
print 'Starting training of: {}'.format(clf_name)
print '----------'
print 'Num Features: {}'.format(len(features))
print 'Shape of Training: {}'.format(training.shape)
print 'Shape of Testing: {}'.format(testing.shape)
print 'Outcome: {}'.format(outcome)
X_train, y_train = training[features].values, training[outcome].values
X_test = testing[features].values
if normalize:
X_train = preprocessing.StandardScaler().fit(X_train).transform(X_train)
X_test = preprocessing.StandardScaler().fit(X_test).transform(X_test)
fitted_clf = clf.fit(X_train, y_train)
if verbose:
print 'Finished Training'
print '\n'
print 'Starting Testing:'
print '----------'
predicted_probabilities = fitted_clf.predict_proba(X_test)
if verbose:
print 'Finished Testing...\n'
return fitted_clf, predicted_probabilities
def run_and_output_model_to_s3(training, testing, features, outcome, clf, clf_name, s3_path,
verbose=True, **kwargs):
fitted_clf, predicted_probs = run_model(training, testing, features, outcome, clf,
clf_name, verbose)
#Pickling happens here
os.mkdir('../results/temp/')
filepath = os.path.join('../results/temp', clf_name + '.pkl')
pickle.dump(fitted_clf, open(filepath, 'wb'))
print 'Uploading to S3 at {}'.format(s3_path)
upload_to_s3('../results/temp', clf_name + '.pkl', s3_path = s3_path)
print 'Done uploading {} to s3 \n'.format(filepath)
os.remove(filepath)
os.rmdir('../results/temp/')
# Putting the diagnostics dict into a dataframe and saving to results folder
diagnostics_dict = get_diagnostics_dict(fitted_clf, testing, features, outcome, clf_name, **kwargs)
results_df = pd.read_csv('../results/results.csv')
results_df = results_df.append([diagnostics_dict])
results_df.to_csv(path_or_buf='../results/results.csv', index=False)
return diagnostics_dict
|
mit
| -8,404,296,448,362,090,000
| 37.459016
| 103
| 0.665388
| false
| 3.682889
| true
| false
| false
|
akretion/odoo
|
odoo/tools/image.py
|
10
|
15192
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import codecs
import io
from PIL import Image
from PIL import ImageEnhance
from random import randrange
# Preload PIL with the minimal subset of image formats we need
from odoo.tools import pycompat
Image.preinit()
Image._initialized = 2
# Maps only the 6 first bits of the base64 data, accurate enough
# for our purpose and faster than decoding the full blob first
FILETYPE_BASE64_MAGICWORD = {
b'/': 'jpg',
b'R': 'gif',
b'i': 'png',
b'P': 'svg+xml',
}
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False, upper_limit=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectively
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
# Return unmodified content if no resize or we etect first 6 bits of '<'
# (0x3C) for SVG documents - This will bypass XML files as well, but it's
# harmless for these purposes
if size == (None, None) or base64_source[:1] == b'P':
return base64_source
image_stream = io.BytesIO(codecs.decode(base64_source, encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if upper_limit:
if asked_width:
if asked_width >= image.size[0]:
asked_width = image.size[0]
if asked_height:
if asked_height >= image.size[1]:
asked_height = image.size[1]
if image.size[0] >= image.size[1]:
asked_height = None
else:
asked_width = None
if asked_width is None and asked_height is None:
return base64_source
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size, upper_limit=upper_limit)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"] or (filetype == 'JPEG' and image.mode == 'RGBA'):
image = image.convert("RGB")
background_stream = io.BytesIO()
image.save(background_stream, filetype)
return codecs.encode(background_stream.getvalue(), encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0, upper_limit=False):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
origin_mode = image.mode
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
if upper_limit:
image = Image.new('RGBA', (size[0], size[1]-3), (255, 255, 255, 0)) # FIXME temporary fix for trimming the ghost border.
else:
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) // 2, (size[1] - resized_image.size[1]) // 2))
if image.mode != origin_mode:
image = image.convert(origin_mode)
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = io.BytesIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Crop Image
# ----------------------------------------
def crop_image(data, type='top', ratio=False, size=None, image_format=None):
""" Used for cropping image and create thumbnail
:param data: base64 data of image.
:param type: Used for cropping position possible
Possible Values : 'top', 'center', 'bottom'
:param ratio: Cropping ratio
e.g for (4,3), (16,9), (16,10) etc
send ratio(1,1) to generate square image
:param size: Resize image to size
e.g (200, 200)
after crop resize to 200x200 thumbnail
:param image_format: return image format PNG,JPEG etc
"""
if not data:
return False
image_stream = Image.open(io.BytesIO(base64.b64decode(data)))
output_stream = io.BytesIO()
w, h = image_stream.size
new_h = h
new_w = w
if ratio:
w_ratio, h_ratio = ratio
new_h = (w * h_ratio) // w_ratio
new_w = w
if new_h > h:
new_h = h
new_w = (h * w_ratio) // h_ratio
image_format = image_format or image_stream.format or 'JPEG'
if type == "top":
cropped_image = image_stream.crop((0, 0, new_w, new_h))
cropped_image.save(output_stream, format=image_format)
elif type == "center":
cropped_image = image_stream.crop(((w - new_w) // 2, (h - new_h) // 2, (w + new_w) // 2, (h + new_h) // 2))
cropped_image.save(output_stream, format=image_format)
elif type == "bottom":
cropped_image = image_stream.crop((0, h - new_h, new_w, h))
cropped_image.save(output_stream, format=image_format)
else:
raise ValueError('ERROR: invalid value for crop_type')
if size:
thumbnail = Image.open(io.BytesIO(output_stream.getvalue()))
output_stream.truncate(0)
output_stream.seek(0)
thumbnail.thumbnail(size, Image.ANTIALIAS)
thumbnail.save(output_stream, image_format)
return base64.b64encode(output_stream.getvalue())
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(io.BytesIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
image.paste(color, box=(0, 0) + original.size)
image.paste(original, mask=original)
# return the new image
buffer = io.BytesIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes={}):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returned values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
size_big = sizes.get(big_name, (1024, 1024))
size_medium = sizes.get(medium_name, (128, 128))
size_small = sizes.get(small_name, (64, 64))
if isinstance(base64_source, pycompat.text_type):
base64_source = base64_source.encode('ascii')
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big, size=size_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium, size=size_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small, size=size_small)
return return_dict
def image_resize_images(vals, big_name='image', medium_name='image_medium', small_name='image_small', sizes={}):
""" Update ``vals`` with image fields resized as expected. """
if vals.get(big_name):
vals.update(image_get_resized_images(vals[big_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes=sizes))
elif vals.get(medium_name):
vals.update(image_get_resized_images(vals[medium_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=False, sizes=sizes))
elif vals.get(small_name):
vals.update(image_get_resized_images(vals[small_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=True, sizes=sizes))
elif big_name in vals or medium_name in vals or small_name in vals:
vals[big_name] = vals[medium_name] = vals[small_name] = False
def image_data_uri(base64_source):
"""This returns data URL scheme according RFC 2397
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.
"""
return 'data:image/%s;base64,%s' % (
FILETYPE_BASE64_MAGICWORD.get(base64_source[:1], 'png'),
base64_source.decode(),
)
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = base64.b64encode(open(sys.argv[1],'rb').read())
new = image_resize_image(img, (128,100))
open(sys.argv[2], 'wb').write(base64.b64decode(new))
|
agpl-3.0
| 2,528,655,349,134,123,500
| 44.214286
| 132
| 0.634742
| false
| 3.748335
| false
| false
| false
|
jjdmol/LOFAR
|
SAS/ResourceAssignment/ResourceAssignmentEstimator/resource_estimators/base_resource_estimator.py
|
1
|
3118
|
# base_resource_estimator.py
#
# Copyright (C) 2016
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: base_resource_estimator.py 33534 2016-02-08 14:28:26Z schaap $
""" Base class for Resource Estimators
"""
import logging
from datetime import datetime
from lofar.common.datetimeutils import totalSeconds
from datetime import datetime, timedelta
from lofar.parameterset import parameterset
logger = logging.getLogger(__name__)
class BaseResourceEstimator(object):
""" Base class for all other resource estmiater classes
"""
def __init__(self, name):
self.name = name
self.required_keys = ()
def _checkParsetForRequiredKeys(self, parset):
""" Check if all required keys needed are available """
logger.debug("required keys: %s" % ', '.join(self.required_keys))
logger.debug("parset keys: %s" % ', '.join(parset.keys()))
missing_keys = set(self.required_keys) - set(parset.keys())
if missing_keys:
logger.error("missing keys: %s" % ', '.join(missing_keys))
return False
return True
def _getDateTime(self, date_time):
return datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')
def _getDuration(self, start, end):
startTime = self._getDateTime(start)
endTime = self._getDateTime(end)
if startTime >= endTime:
logger.warning("startTime is after endTime")
return 1 ##TODO To prevent divide by zero later
return totalSeconds(endTime - startTime)
#TODO check if this makes duration = int(parset.get('duration', 0)) as a key reduntant?
def _calculate(self, parset, input_files={}):
raise NotImplementedError('calculate() in base class is called. Please implement calculate() in your subclass')
def verify_and_estimate(self, parset, input_files={}):
""" Create estimates for a single process based on its parset and input files"""
if self._checkParsetForRequiredKeys(parset):
estimates = self._calculate(parameterset(parset), input_files)
else:
raise ValueError('The parset is incomplete')
result = {}
result[self.name] = {}
result[self.name]['storage'] = estimates['storage']
result[self.name]['bandwidth'] = estimates['bandwidth']
return result
|
gpl-3.0
| 7,776,506,931,752,656,000
| 40.026316
| 119
| 0.679602
| false
| 3.977041
| false
| false
| false
|
midokura/python-midonetclient
|
src/midonetclient/tenant.py
|
1
|
2329
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ryu Ishimoto <ryu@midokura.com>, Midokura
from midonetclient import vendor_media_type
from midonetclient.bridge import Bridge
from midonetclient.chain import Chain
from midonetclient.port_group import PortGroup
from midonetclient.resource_base import ResourceBase
from midonetclient.router import Router
class Tenant(ResourceBase):
media_type = vendor_media_type.APPLICATION_TENANT_JSON
def __init__(self, uri, dto, auth):
super(Tenant, self).__init__(uri, dto, auth)
def get_name(self):
return self.dto['name']
def get_id(self):
return self.dto['id']
def id(self, id):
self.dto['id'] = id
return self
def name(self, name):
self.dto['name'] = name
return self
def get_bridges(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_BRIDGE_COLLECTION_JSON}
return self.get_children(self.dto['bridges'], query, headers, Bridge)
def get_chains(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_CHAIN_COLLECTION_JSON}
return self.get_children(self.dto['chains'], query, headers, Chain)
def get_port_groups(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
PortGroup)
def get_routers(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_ROUTER_COLLECTION_JSON}
return self.get_children(self.dto['routers'], query, headers, Router)
|
apache-2.0
| -1,329,265,309,934,497,500
| 32.753623
| 77
| 0.674109
| false
| 3.811784
| false
| false
| false
|
pudo-attic/ted-xml
|
parse.py
|
1
|
6683
|
import os
from lxml import etree
from pprint import pprint
from forms.parseutil import ted_documents, Extractor
from collections import defaultdict
from common import engine, documents_table, contracts_table, cpvs_table, references_table
def select_form(form, lang):
lang = lang.split()[0]
children = form.getchildren()
if len(children) == 1:
return children.pop()
orig = None
for child in children:
if child.get('LG') == 'EN':
return child
if child.get('LG') == lang:
orig = child
return orig
def parse(filename, file_content):
#fh = open(filename, 'rb')
xmldata = file_content.replace('xmlns="', 'xmlns_="')
#fh.close()
#print xmldata.decode('utf-8').encode('ascii', 'replace')
root = etree.fromstring(xmldata)
form = root.find('.//FORM_SECTION')
form.getparent().remove(form)
ext = Extractor(root)
cpvs = [{'code': e.get('CODE'), 'text': e.text} for e in root.findall('.//NOTICE_DATA/ORIGINAL_CPV')]
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/ORIGINAL_CPV')
refs = [e.text for e in root.findall('.//NOTICE_DATA/REF_NOTICE/NO_DOC_OJS')]
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/REF_NOTICE/NO_DOC_OJS')
data = {
'technical_reception_id': ext.text('./TECHNICAL_SECTION/RECEPTION_ID'),
'technical_comments': ext.text('./TECHNICAL_SECTION/COMMENTS'),
'technical_deletion_date': ext.text('./TECHNICAL_SECTION/DELETION_DATE'),
'technical_form_lang': ext.text('./TECHNICAL_SECTION/FORM_LG_LIST'),
'reception_id': ext.text('./TECHNICAL_SECTION/RECEPTION_ID'),
'oj_collection': ext.text('.//REF_OJS/COLL_OJ'),
'oj_number': ext.text('.//REF_OJS/NO_OJ'),
'oj_date': ext.text('.//REF_OJS/DATE_PUB'),
'doc_no': ext.text('.//NOTICE_DATA/NO_DOC_OJS'),
'doc_url': ext.text('.//NOTICE_DATA//URI_DOC[@LG="EN"]') or ext.text('.//NOTICE_DATA//URI_DOC'),
'info_url': ext.text('.//NOTICE_DATA/IA_URL_GENERAL'),
'etendering_url': ext.text('.//NOTICE_DATA/IA_URL_ETENDERING'),
'orig_language': ext.text('.//NOTICE_DATA/LG_ORIG'),
'orig_nuts': ext.text('.//NOTICE_DATA/ORIGINAL_NUTS'),
'orig_nuts_code': ext.attr('.//NOTICE_DATA/ORIGINAL_NUTS', 'CODE'),
'iso_country': ext.attr('.//NOTICE_DATA/ISO_COUNTRY', 'VALUE'),
'original_cpv': cpvs,
'references': refs,
'dispatch_date': ext.text('.//CODIF_DATA/DS_DATE_DISPATCH'),
'request_document_date': ext.text('.//CODIF_DATA/DD_DATE_REQUEST_DOCUMENT'),
'submission_date': ext.text('.//CODIF_DATA/DT_DATE_FOR_SUBMISSION'),
'heading': ext.text('.//CODIF_DATA/HEADING'),
'directive': ext.attr('.//CODIF_DATA/DIRECTIVE', 'VALUE'),
'authority_type_code': ext.attr('.//CODIF_DATA/AA_AUTHORITY_TYPE', 'CODE'),
'authority_type': ext.text('.//CODIF_DATA/AA_AUTHORITY_TYPE'),
'document_type_code': ext.attr('.//CODIF_DATA/TD_DOCUMENT_TYPE', 'CODE'),
'document_type': ext.text('.//CODIF_DATA/TD_DOCUMENT_TYPE'),
'contract_nature_code': ext.attr('.//CODIF_DATA/NC_CONTRACT_NATURE', 'CODE'),
'contract_nature': ext.text('.//CODIF_DATA/NC_CONTRACT_NATURE'),
'procedure_code': ext.attr('.//CODIF_DATA/PR_PROC', 'CODE'),
'procedure': ext.text('.//CODIF_DATA/PR_PROC'),
'regulation_code': ext.attr('.//CODIF_DATA/RP_REGULATION', 'CODE'),
'regulation': ext.text('.//CODIF_DATA/RP_REGULATION'),
'bid_type_code': ext.attr('.//CODIF_DATA/TY_TYPE_BID', 'CODE'),
'bid_type': ext.text('.//CODIF_DATA/TY_TYPE_BID'),
'award_criteria_code': ext.attr('.//CODIF_DATA/AC_AWARD_CRIT', 'CODE'),
'award_criteria': ext.text('.//CODIF_DATA/AC_AWARD_CRIT'),
'main_activities_code': ext.attr('.//CODIF_DATA/MA_MAIN_ACTIVITIES', 'CODE'),
'main_activities': ext.text('.//CODIF_DATA/MA_MAIN_ACTIVITIES'),
'title_text': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_TEXT'),
'title_town': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_TOWN'),
'title_country': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_CY'),
'authority_name': ext.text('./TRANSLATION_SECTION/ML_AA_NAMES/AA_NAME')
}
ext.ignore('./LINKS_SECTION/FORMS_LABELS_LINK')
ext.ignore('./LINKS_SECTION/OFFICIAL_FORMS_LINK')
ext.ignore('./LINKS_SECTION/ORIGINAL_NUTS_LINK')
ext.ignore('./LINKS_SECTION/ORIGINAL_CPV_LINK')
ext.ignore('./LINKS_SECTION/XML_SCHEMA_DEFINITION_LINK')
# TODO: Figure out if we need any of this, even with the forms.
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST/VALUES/SINGLE_VALUE/VALUE')
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST')
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST/VALUES/RANGE_VALUE/VALUE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/TOWN')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/POSTAL_CODE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/PHONE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ORGANISATION/OFFICIALNAME')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/FAX')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/COUNTRY')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/CONTACT_POINT')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ATTENTION')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ADDRESS')
ext.audit()
form_ = select_form(form, data['orig_language'])
contracts = []
if form_.tag.startswith('CONTRACT_AWARD_'):
from forms.contract_award import parse_form
contracts = parse_form(form_)
# save to DB
doc_no = data['doc_no']
engine.begin()
cpvs_table.delete(doc_no=doc_no)
references_table.delete(doc_no=doc_no)
contracts_table.delete(doc_no=doc_no)
documents_table.delete(doc_no=doc_no)
for cpv in data.pop('original_cpv'):
cpv['doc_no'] = doc_no
cpvs_table.insert(cpv)
for ref in data.pop('references'):
obj = {'doc_no': doc_no, 'ref': ref}
references_table.insert(obj)
for contract in contracts:
contract['doc_no'] = doc_no
contracts_table.insert(contract)
documents_table.insert(data)
engine.commit()
#pprint(data)
if __name__ == '__main__':
import sys
for file_name, file_content in ted_documents():
parse(file_name, file_content)
#break
#parse_all(sys.argv[1])
|
mit
| 4,463,501,169,465,063,000
| 46.397163
| 105
| 0.638635
| false
| 3.128745
| false
| false
| false
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
1
|
13772
|
"""
This is awesome. And needs more documentation.
To bring some light in the big number of classes in this file:
First there are:
* ``SuperForm``
* ``SuperModelForm``
They are the forms that you probably want to use in your own code. They are
direct base classes of ``django.forms.Form`` and ``django.forms.ModelForm``
and have the formset functionallity of this module backed in. They are ready
to use. Subclass them and be happy.
Then there are:
* ``SuperFormMixin``
* ``SuperModelFormMixin``
These are the mixins you can use if you don't want to subclass from
``django.forms.Form`` for whatever reason. The ones with Base at the beginning
don't have a metaclass attached. The ones without the Base in the name have
the relevant metaclass in place that handles the search for
``FormSetField``s.
Here is an example on how you can use this module::
from django import forms
from django_superform import SuperModelForm, FormSetField
from .forms import CommentFormSet
class PostForm(SuperModelForm):
title = forms.CharField()
text = forms.CharField()
comments = FormSetField(CommentFormSet)
# Now you can use the form in the view:
def post_form(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save()
return HttpResponseRedirect(obj.get_absolute_url())
else:
form = PostForm()
return render_to_response('post_form.html', {
'form',
}, context_instance=RequestContext(request))
And yes, thanks for asking, the ``form.is_valid()`` and ``form.save()`` calls
transparantly propagate to the defined comments formset and call their
``is_valid()`` and ``save()`` methods. So you don't have to do anything
special in your view!
Now to how you can access the instantiated formsets::
>>> form = PostForm()
>>> form.composite_fields['comments']
<CommetFormSet: ...>
Or in the template::
{{ form.as_p }}
{{ form.composite_fields.comments.management_form }}
{% for fieldset_form in form.composite_fields.comments %}
{{ fieldset_form.as_p }}
{% endfor %}
You're welcome.
"""
from functools import reduce
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass, ErrorDict, ErrorList
from django.forms.models import ModelFormMetaclass
from django.utils import six
import copy
from .fields import CompositeField
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
class DeclerativeCompositeFieldsMetaclass(type):
"""
Metaclass that converts FormField and FormSetField attributes to a
dictionary called `composite_fields`. It will also include all composite
fields from parent classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect composite fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, CompositeField):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_composite_fields'] = OrderedDict(current_fields)
new_class = super(DeclerativeCompositeFieldsMetaclass, mcs).__new__(
mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_composite_fields'):
declared_fields.update(base.declared_composite_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_composite_fields = declared_fields
new_class.declared_composite_fields = declared_fields
return new_class
class SuperFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
DeclarativeFieldsMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperForm`.
"""
class SuperModelFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
ModelFormMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperModelForm`.
"""
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
class SuperModelForm(six.with_metaclass(SuperModelFormMetaclass,
SuperModelFormMixin, forms.ModelForm)):
"""
The ``SuperModelForm`` works like a Django ``ModelForm`` but has the
capabilities of nesting like :class:`~django_superform.forms.SuperForm`.
Saving a ``SuperModelForm`` will also save all nested model forms as well.
"""
class SuperForm(six.with_metaclass(SuperFormMetaclass,
SuperFormMixin, forms.Form)):
"""
The base class for all super forms. The goal of a superform is to behave
just like a normal django form but is able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
|
bsd-3-clause
| 3,049,697,306,495,678,000
| 33.865823
| 79
| 0.636364
| false
| 4.285003
| false
| false
| false
|
openjck/kuma
|
kuma/wiki/models.py
|
1
|
71106
|
import hashlib
import json
import sys
import traceback
from datetime import datetime, timedelta
from functools import wraps
import newrelic.agent
import waffle
from constance import config
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import signals
from django.utils.decorators import available_attrs
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from pyquery import PyQuery
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase
from taggit.utils import edit_string_for_tags, parse_tags
from tidings.models import NotificationsMixin
from kuma.attachments.models import Attachment
from kuma.core.cache import memcache
from kuma.core.exceptions import ProgrammingError
from kuma.core.i18n import get_language_mapping
from kuma.core.urlresolvers import reverse
from kuma.search.decorators import register_live_index
from kuma.spam.models import AkismetSubmission, SpamAttempt
from . import kumascript
from .constants import (DEKI_FILE_URL, DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL,
KUMA_FILE_URL, REDIRECT_CONTENT, REDIRECT_HTML,
TEMPLATE_TITLE_PREFIX)
from .content import parse as parse_content
from .content import (H2TOCFilter, H3TOCFilter, SectionTOCFilter,
extract_code_sample, extract_css_classnames,
extract_html_attributes, extract_kumascript_macro_names,
get_content_sections, get_seo_description)
from .exceptions import (DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress, PageMoveError,
SlugCollision, UniqueCollision)
from .jobs import DocumentContributorsJob, DocumentZoneStackJob
from .managers import (DeletedDocumentManager, DocumentAdminManager,
DocumentManager, RevisionIPManager,
TaggedDocumentManager, TransformManager)
from .search import WikiDocumentType
from .signals import render_done
from .templatetags.jinja_helpers import absolutify
from .utils import tidy_content
def cache_with_field(field_name):
"""Decorator for generated content methods.
If the backing model field is null, or kwarg force_fresh is True, call the
decorated method to generate and return the content.
Otherwise, just return the value in the backing model field.
"""
def decorator(fn):
@wraps(fn, assigned=available_attrs(fn))
def wrapper(self, *args, **kwargs):
force_fresh = kwargs.pop('force_fresh', False)
# Try getting the value using the DB field.
field_val = getattr(self, field_name)
if field_val is not None and not force_fresh:
return field_val
# DB field is blank, or we're forced to generate it fresh.
field_val = fn(self, force_fresh=force_fresh)
setattr(self, field_name, field_val)
return field_val
return wrapper
return decorator
def _inherited(parent_attr, direct_attr):
"""Return a descriptor delegating to an attr of the original document.
If `self` is a translation, the descriptor delegates to the attribute
`parent_attr` from the original document. Otherwise, it delegates to the
attribute `direct_attr` from `self`.
Use this only on a reference to another object, like a ManyToMany or a
ForeignKey. Using it on a normal field won't work well, as it'll preclude
the use of that field in QuerySet field lookups. Also, ModelForms that are
passed instance=this_obj won't see the inherited value.
"""
getter = lambda self: (getattr(self.parent, parent_attr)
if self.parent and self.parent.id != self.id
else getattr(self, direct_attr))
setter = lambda self, val: (setattr(self.parent, parent_attr, val)
if self.parent and self.parent.id != self.id
else setattr(self, direct_attr, val))
return property(getter, setter)
def valid_slug_parent(slug, locale):
slug_bits = slug.split('/')
slug_bits.pop()
parent = None
if slug_bits:
parent_slug = '/'.join(slug_bits)
try:
parent = Document.objects.get(locale=locale, slug=parent_slug)
except Document.DoesNotExist:
raise Exception(
ugettext('Parent %s does not exist.' % (
'%s/%s' % (locale, parent_slug))))
return parent
class DocumentTag(TagBase):
"""A tag indexing a document"""
class Meta:
verbose_name = _('Document Tag')
verbose_name_plural = _('Document Tags')
def tags_for(cls, model, instance=None, **extra_filters):
"""
Sadly copied from taggit to work around the issue of not being
able to use the TaggedItemBase class that has tag field already
defined.
"""
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedDocument(ItemBase):
"""Through model, for tags on Documents"""
content_object = models.ForeignKey('Document')
tag = models.ForeignKey(DocumentTag, related_name="%(app_label)s_%(class)s_items")
objects = TaggedDocumentManager()
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class DocumentAttachment(models.Model):
"""
Intermediary between Documents and Attachments. Allows storing the
user who attached a file to a document, and a (unique for that
document) name for referring to the file from the document.
"""
file = models.ForeignKey(Attachment)
# This has to be a string ref to avoid circular import.
document = models.ForeignKey('wiki.Document')
attached_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
name = models.TextField()
class Meta:
db_table = 'attachments_documentattachment'
@register_live_index
class Document(NotificationsMixin, models.Model):
"""A localized knowledgebase document, not revision-specific."""
TOC_FILTERS = {
1: SectionTOCFilter,
2: H2TOCFilter,
3: H3TOCFilter,
4: SectionTOCFilter
}
title = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=255, db_index=True)
# NOTE: Documents are indexed by tags, but tags are edited in Revisions.
# Also, using a custom through table to isolate Document tags from those
# used in other models and apps. (Works better than namespaces, for
# completion and such.)
tags = TaggableManager(through=TaggedDocument)
# Is this document a template or not?
is_template = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this a redirect or not?
is_redirect = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this document localizable or not?
is_localizable = models.BooleanField(default=True, db_index=True)
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
# Latest approved revision. L10n dashboard depends on this being so (rather
# than being able to set it to earlier approved revisions). (Remove "+" to
# enable reverse link.)
current_revision = models.ForeignKey('Revision', null=True,
related_name='current_for+')
# The Document I was translated from. NULL if this doc is in the default
# locale or it is nonlocalizable. TODO: validate against
# settings.WIKI_DEFAULT_LANGUAGE.
parent = models.ForeignKey('self', related_name='translations',
null=True, blank=True)
parent_topic = models.ForeignKey('self', related_name='children',
null=True, blank=True)
files = models.ManyToManyField(Attachment,
through=DocumentAttachment)
# JSON representation of Document for API results, built on save
json = models.TextField(editable=False, blank=True, null=True)
# Raw HTML of approved revision's wiki markup
html = models.TextField(editable=False)
# Cached result of kumascript and other offline processors (if any)
rendered_html = models.TextField(editable=False, blank=True, null=True)
# Errors (if any) from the last rendering run
rendered_errors = models.TextField(editable=False, blank=True, null=True)
# Whether or not to automatically defer rendering of this page to a queued
# offline task. Generally used for complex pages that need time
defer_rendering = models.BooleanField(default=False, db_index=True)
# Timestamp when this document was last scheduled for a render
render_scheduled_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when a render for this document was last started
render_started_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when this document was last rendered
last_rendered_at = models.DateTimeField(null=True, db_index=True)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
# Time after which this document needs re-rendering
render_expires = models.DateTimeField(blank=True, null=True, db_index=True)
# Whether this page is deleted.
deleted = models.BooleanField(default=False, db_index=True)
# Last modified time for the document. Should be equal-to or greater than
# the current revision's created field
modified = models.DateTimeField(auto_now=True, null=True, db_index=True)
body_html = models.TextField(editable=False, blank=True, null=True)
quick_links_html = models.TextField(editable=False, blank=True, null=True)
zone_subnav_local_html = models.TextField(editable=False,
blank=True, null=True)
toc_html = models.TextField(editable=False, blank=True, null=True)
summary_html = models.TextField(editable=False, blank=True, null=True)
summary_text = models.TextField(editable=False, blank=True, null=True)
class Meta(object):
unique_together = (
('parent', 'locale'),
('slug', 'locale'),
)
permissions = (
('view_document', 'Can view document'),
('add_template_document', 'Can add Template:* document'),
('change_template_document', 'Can change Template:* document'),
('move_tree', 'Can move a tree of documents'),
('purge_document', 'Can permanently delete document'),
('restore_document', 'Can restore deleted document'),
)
objects = DocumentManager()
deleted_objects = DeletedDocumentManager()
admin_objects = DocumentAdminManager()
def __unicode__(self):
return u'%s (%s)' % (self.get_absolute_url(), self.title)
@cache_with_field('body_html')
def get_body_html(self, *args, **kwargs):
html = self.rendered_html and self.rendered_html or self.html
sections_to_hide = ('Quick_Links', 'Subnav')
doc = parse_content(html)
for sid in sections_to_hide:
doc = doc.replaceSection(sid, '<!-- -->')
doc.injectSectionIDs()
doc.annotateLinks(base_url=settings.SITE_URL)
return doc.serialize()
@cache_with_field('quick_links_html')
def get_quick_links_html(self, *args, **kwargs):
return self.get_section_content('Quick_Links')
@cache_with_field('zone_subnav_local_html')
def get_zone_subnav_local_html(self, *args, **kwargs):
return self.get_section_content('Subnav')
@cache_with_field('toc_html')
def get_toc_html(self, *args, **kwargs):
if not self.current_revision:
return ''
toc_depth = self.current_revision.toc_depth
if not toc_depth:
return ''
html = self.rendered_html and self.rendered_html or self.html
return (parse_content(html)
.injectSectionIDs()
.filter(self.TOC_FILTERS[toc_depth])
.serialize())
@cache_with_field('summary_html')
def get_summary_html(self, *args, **kwargs):
return self.get_summary(strip_markup=False)
@cache_with_field('summary_text')
def get_summary_text(self, *args, **kwargs):
return self.get_summary(strip_markup=True)
def regenerate_cache_with_fields(self):
"""Regenerate fresh content for all the cached fields"""
# TODO: Maybe @cache_with_field can build a registry over which this
# method can iterate?
self.get_body_html(force_fresh=True)
self.get_quick_links_html(force_fresh=True)
self.get_zone_subnav_local_html(force_fresh=True)
self.get_toc_html(force_fresh=True)
self.get_summary_html(force_fresh=True)
self.get_summary_text(force_fresh=True)
def get_zone_subnav_html(self):
"""
Search from self up through DocumentZone stack, returning the first
zone nav HTML found.
"""
src = self.get_zone_subnav_local_html()
if src:
return src
for zone in DocumentZoneStackJob().get(self.pk):
src = zone.document.get_zone_subnav_local_html()
if src:
return src
def extract_section(self, content, section_id, ignore_heading=False):
parsed_content = parse_content(content)
extracted = parsed_content.extractSection(section_id,
ignore_heading=ignore_heading)
return extracted.serialize()
def get_section_content(self, section_id, ignore_heading=True):
"""
Convenience method to extract the rendered content for a single section
"""
if self.rendered_html:
content = self.rendered_html
else:
content = self.html
return self.extract_section(content, section_id, ignore_heading)
def calculate_etag(self, section_id=None):
"""Calculate an etag-suitable hash for document content or a section"""
if not section_id:
content = self.html
else:
content = self.extract_section(self.html, section_id)
return '"%s"' % hashlib.sha1(content.encode('utf8')).hexdigest()
def current_or_latest_revision(self):
"""Returns current revision if there is one, else the last created
revision."""
rev = self.current_revision
if not rev:
revs = self.revisions.order_by('-created')
if revs.exists():
rev = revs[0]
return rev
@property
def is_rendering_scheduled(self):
"""Does this have a rendering scheduled?"""
if not self.render_scheduled_at:
return False
# Check whether a scheduled rendering has waited for too long. Assume
# failure, in this case, and allow another scheduling attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_scheduled_at
if duration > max_duration:
return False
if not self.last_rendered_at:
return True
return self.render_scheduled_at > self.last_rendered_at
@property
def is_rendering_in_progress(self):
"""Does this have a rendering in progress?"""
if not self.render_started_at:
# No start time, so False.
return False
# Check whether an in-progress rendering has gone on for too long.
# Assume failure, in this case, and allow another rendering attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_started_at
if duration > max_duration:
return False
if not self.last_rendered_at:
# No rendering ever, so in progress.
return True
# Finally, if the render start is more recent than last completed
# render, then we have one in progress.
return self.render_started_at > self.last_rendered_at
@newrelic.agent.function_trace()
def get_rendered(self, cache_control=None, base_url=None):
"""Attempt to get rendered content for this document"""
# No rendered content yet, so schedule the first render.
if not self.rendered_html:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
# Unable to trigger a rendering right now, so we bail.
raise DocumentRenderedContentNotAvailable
# If we have a cache_control directive, try scheduling a render.
if cache_control:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
pass
# Parse JSON errors, if available.
errors = None
try:
errors = (self.rendered_errors and
json.loads(self.rendered_errors) or None)
except ValueError:
pass
# If the above resulted in an immediate render, we might have content.
if not self.rendered_html:
if errors:
return ('', errors)
else:
# But, no such luck, so bail out.
raise DocumentRenderedContentNotAvailable
return (self.rendered_html, errors)
def schedule_rendering(self, cache_control=None, base_url=None):
"""
Attempt to schedule rendering. Honor the deferred_rendering field to
decide between an immediate or a queued render.
"""
# Avoid scheduling a rendering if already scheduled or in progress.
if self.is_rendering_scheduled or self.is_rendering_in_progress:
return False
# Note when the rendering was scheduled. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_scheduled_at=now)
self.render_scheduled_at = now
if (waffle.switch_is_active('wiki_force_immediate_rendering') or
not self.defer_rendering):
# Attempt an immediate rendering.
self.render(cache_control, base_url)
else:
# Attempt to queue a rendering. If celery.conf.ALWAYS_EAGER is
# True, this is also an immediate rendering.
from . import tasks
tasks.render_document.delay(self.pk, cache_control, base_url)
def render(self, cache_control=None, base_url=None, timeout=None):
"""
Render content using kumascript and any other services necessary.
"""
if not base_url:
base_url = settings.SITE_URL
# Disallow rendering while another is in progress.
if self.is_rendering_in_progress:
raise DocumentRenderingInProgress
# Note when the rendering was started. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_started_at=now)
self.render_started_at = now
# Perform rendering and update document
if not config.KUMASCRIPT_TIMEOUT:
# A timeout of 0 should shortcircuit kumascript usage.
self.rendered_html, self.rendered_errors = self.html, []
else:
self.rendered_html, errors = kumascript.get(self, cache_control,
base_url,
timeout=timeout)
self.rendered_errors = errors and json.dumps(errors) or None
# Regenerate the cached content fields
self.regenerate_cache_with_fields()
# Finally, note the end time of rendering and update the document.
self.last_rendered_at = datetime.now()
# If this rendering took longer than we'd like, mark it for deferred
# rendering in the future.
timeout = config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = self.last_rendered_at - self.render_started_at
if duration >= max_duration:
self.defer_rendering = True
# TODO: Automatically clear the defer_rendering flag if the rendering
# time falls under the limit? Probably safer to require manual
# intervention to free docs from deferred jail.
if self.render_max_age:
# If there's a render_max_age, automatically update render_expires
self.render_expires = (datetime.now() +
timedelta(seconds=self.render_max_age))
else:
# Otherwise, just clear the expiration time as a one-shot
self.render_expires = None
self.save()
render_done.send(sender=self.__class__, instance=self)
def get_summary(self, strip_markup=True, use_rendered=True):
"""
Attempt to get the document summary from rendered content, with
fallback to raw HTML
"""
if use_rendered and self.rendered_html:
src = self.rendered_html
else:
src = self.html
return get_seo_description(src, self.locale, strip_markup)
def build_json_data(self):
html = self.rendered_html and self.rendered_html or self.html
content = parse_content(html).injectSectionIDs().serialize()
sections = get_content_sections(content)
translations = []
if self.pk:
for translation in self.other_translations:
revision = translation.current_revision
if revision.summary:
summary = revision.summary
else:
summary = translation.get_summary(strip_markup=False)
translations.append({
'last_edit': revision.created.isoformat(),
'locale': translation.locale,
'localization_tags': list(revision.localization_tags
.names()),
'review_tags': list(revision.review_tags.names()),
'summary': summary,
'tags': list(translation.tags.names()),
'title': translation.title,
'url': translation.get_absolute_url(),
})
if self.current_revision:
review_tags = list(self.current_revision.review_tags.names())
localization_tags = list(self.current_revision
.localization_tags
.names())
last_edit = self.current_revision.created.isoformat()
if self.current_revision.summary:
summary = self.current_revision.summary
else:
summary = self.get_summary(strip_markup=False)
else:
review_tags = []
localization_tags = []
last_edit = ''
summary = ''
if not self.pk:
tags = []
else:
tags = list(self.tags.names())
now_iso = datetime.now().isoformat()
if self.modified:
modified = self.modified.isoformat()
else:
modified = now_iso
return {
'title': self.title,
'label': self.title,
'url': self.get_absolute_url(),
'id': self.id,
'slug': self.slug,
'tags': tags,
'review_tags': review_tags,
'localization_tags': localization_tags,
'sections': sections,
'locale': self.locale,
'summary': summary,
'translations': translations,
'modified': modified,
'json_modified': now_iso,
'last_edit': last_edit
}
def get_json_data(self, stale=True):
"""Returns a document in object format for output as JSON.
The stale parameter, when True, accepts stale cached data even after
the document has been modified."""
# Have parsed data & don't care about freshness? Here's a quick out..
curr_json_data = getattr(self, '_json_data', None)
if curr_json_data and stale:
return curr_json_data
# Attempt to parse the current contents of self.json, taking care in
# case it's empty or broken JSON.
self._json_data = {}
if self.json:
try:
self._json_data = json.loads(self.json)
except (TypeError, ValueError):
pass
# Try to get ISO 8601 datestamps for the doc and the json
json_lmod = self._json_data.get('json_modified', '')
doc_lmod = self.modified.isoformat()
# If there's no parsed data or the data is stale & we care, it's time
# to rebuild the cached JSON data.
if (not self._json_data) or (not stale and doc_lmod > json_lmod):
self._json_data = self.build_json_data()
self.json = json.dumps(self._json_data)
Document.objects.filter(pk=self.pk).update(json=self.json)
return self._json_data
def extract_code_sample(self, id):
"""Given the id of a code sample, attempt to extract it from rendered
HTML with a fallback to non-rendered in case of errors."""
try:
src, errors = self.get_rendered()
if errors:
src = self.html
except:
src = self.html
return extract_code_sample(id, src)
def extract_kumascript_macro_names(self):
return extract_kumascript_macro_names(self.html)
def extract_css_classnames(self):
return extract_css_classnames(self.rendered_html)
def extract_html_attributes(self):
return extract_html_attributes(self.rendered_html)
def natural_key(self):
return (self.locale, self.slug)
@staticmethod
def natural_key_hash(keys):
natural_key = u'/'.join(keys)
return hashlib.md5(natural_key.encode('utf8')).hexdigest()
@cached_property
def natural_cache_key(self):
return self.natural_key_hash(self.natural_key())
def _existing(self, attr, value):
"""Return an existing doc (if any) in this locale whose `attr` attr is
equal to mine."""
return Document.objects.filter(locale=self.locale, **{attr: value})
def _raise_if_collides(self, attr, exception):
"""Raise an exception if a page of this title/slug already exists."""
if self.id is None or hasattr(self, 'old_' + attr):
# If I am new or my title/slug changed...
existing = self._existing(attr, getattr(self, attr))
if existing.exists():
raise exception(existing[0])
def clean(self):
"""Translations can't be localizable."""
self._clean_is_localizable()
def _clean_is_localizable(self):
"""is_localizable == allowed to have translations. Make sure that isn't
violated.
For default language (en-US), is_localizable means it can have
translations. Enforce:
* is_localizable=True if it has translations
* if has translations, unable to make is_localizable=False
For non-default langauges, is_localizable must be False.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
self.is_localizable = False
# Can't save this translation if parent not localizable
if (self.parent and self.parent.id != self.id and
not self.parent.is_localizable):
raise ValidationError('"%s": parent "%s" is not localizable.' % (
unicode(self), unicode(self.parent)))
# Can't make not localizable if it has translations
# This only applies to documents that already exist, hence self.pk
if self.pk and not self.is_localizable and self.translations.exists():
raise ValidationError('"%s": document has %s translations but is '
'not localizable.' %
(unicode(self), self.translations.count()))
def _attr_for_redirect(self, attr, template):
"""Return the slug or title for a new redirect.
`template` is a Python string template with "old" and "number" tokens
used to create the variant.
"""
def unique_attr():
"""Return a variant of getattr(self, attr) such that there is no
Document of my locale with string attribute `attr` equal to it.
Never returns the original attr value.
"""
# "My God, it's full of race conditions!"
i = 1
while True:
new_value = template % dict(old=getattr(self, attr), number=i)
if not self._existing(attr, new_value).exists():
return new_value
i += 1
old_attr = 'old_' + attr
if hasattr(self, old_attr):
# My slug (or title) is changing; we can reuse it for the redirect.
return getattr(self, old_attr)
else:
# Come up with a unique slug (or title):
return unique_attr()
def revert(self, revision, user, comment=None):
old_review_tags = list(revision.review_tags.names())
revision.id = None
revision.comment = ("Revert to revision of %s by %s" %
(revision.created, revision.creator))
if comment:
revision.comment += ': "%s"' % comment
revision.created = datetime.now()
revision.creator = user
revision.save()
# TODO: change to primary key check instead of object comparison
if revision.document.original == self:
revision.save(update_fields=['based_on'])
if old_review_tags:
revision.review_tags.set(*old_review_tags)
revision.make_current()
self.schedule_rendering('max-age=0')
return revision
def revise(self, user, data, section_id=None):
"""Given a dict of changes to make, build and save a new Revision to
revise this document"""
curr_rev = self.current_revision
new_rev = Revision(creator=user, document=self, content=self.html)
for n in ('title', 'slug', 'render_max_age'):
setattr(new_rev, n, getattr(self, n))
if curr_rev:
new_rev.toc_depth = curr_rev.toc_depth
original_doc = curr_rev.document.original
if original_doc == self:
new_rev.based_on = curr_rev
else:
new_rev.based_on = original_doc.current_revision
# Accept optional field edits...
new_title = data.get('title', False)
new_rev.title = new_title and new_title or self.title
new_tags = data.get('tags', False)
new_rev.tags = (new_tags and new_tags or
edit_string_for_tags(self.tags.all()))
new_review_tags = data.get('review_tags', False)
if new_review_tags:
review_tags = new_review_tags
elif curr_rev:
review_tags = edit_string_for_tags(curr_rev.review_tags.all())
else:
review_tags = ''
new_rev.summary = data.get('summary', '')
# To add comment, when Technical/Editorial review completed
new_rev.comment = data.get('comment', '')
# Accept HTML edits, optionally by section
new_html = data.get('content', data.get('html', False))
if new_html:
if not section_id:
new_rev.content = new_html
else:
content = parse_content(self.html)
new_rev.content = (content.replaceSection(section_id, new_html)
.serialize())
# Finally, commit the revision changes and return the new rev.
new_rev.save()
new_rev.review_tags.set(*parse_tags(review_tags))
return new_rev
@cached_property
def last_modified_cache_key(self):
return DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % self.natural_cache_key
def fill_last_modified_cache(self):
"""
Convert python datetime to Unix epoch seconds. This is more
easily digested by the cache, and is more compatible with other
services that might spy on Kuma's cache entries (eg. KumaScript)
"""
modified_epoch = self.modified.strftime('%s')
memcache.set(self.last_modified_cache_key, modified_epoch)
return modified_epoch
def save(self, *args, **kwargs):
self.is_template = self.slug.startswith(TEMPLATE_TITLE_PREFIX)
self.is_redirect = bool(self.get_redirect_url())
try:
# Check if the slug would collide with an existing doc
self._raise_if_collides('slug', SlugCollision)
except UniqueCollision as err:
if err.existing.get_redirect_url() is not None:
# If the existing doc is a redirect, delete it and clobber it.
err.existing.delete()
else:
raise err
# These are too important to leave to a (possibly omitted) is_valid
# call:
self._clean_is_localizable()
if not self.parent_topic and self.parent:
# If this is a translation without a topic parent, try to get one.
self.acquire_translated_topic_parent()
super(Document, self).save(*args, **kwargs)
# Delete any cached last-modified timestamp.
self.fill_last_modified_cache()
def delete(self, *args, **kwargs):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to delete document %s: %s" %
(self.id, self.title))
else:
if self.is_redirect or 'purge' in kwargs:
if 'purge' in kwargs:
kwargs.pop('purge')
return super(Document, self).delete(*args, **kwargs)
signals.pre_delete.send(sender=self.__class__,
instance=self)
if not self.deleted:
Document.objects.filter(pk=self.pk).update(deleted=True)
memcache.delete(self.last_modified_cache_key)
signals.post_delete.send(sender=self.__class__, instance=self)
def purge(self):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to purge document %s: %s" %
(self.id, self.title))
else:
if not self.deleted:
raise Exception("Attempt tp purge non-deleted document %s: %s" %
(self.id, self.title))
self.delete(purge=True)
def restore(self):
"""
Restores a logically deleted document by reverting the deleted
boolean to False. Sends pre_save and post_save Django signals to
follow ducktyping best practices.
"""
if not self.deleted:
raise Exception("Document is not deleted, cannot be restored.")
signals.pre_save.send(sender=self.__class__, instance=self)
Document.deleted_objects.filter(pk=self.pk).update(deleted=False)
signals.post_save.send(sender=self.__class__, instance=self)
def _post_move_redirects(self, new_slug, user, title):
"""
Create and return a Document and a Revision to serve as
redirects once this page has been moved.
"""
redirect_doc = Document(locale=self.locale,
title=self.title,
slug=self.slug,
is_localizable=False)
content = REDIRECT_CONTENT % {
'href': reverse('wiki.document',
args=[new_slug],
locale=self.locale),
'title': title,
}
redirect_rev = Revision(content=content,
is_approved=True,
toc_depth=self.current_revision.toc_depth,
creator=user)
return redirect_doc, redirect_rev
def _moved_revision(self, new_slug, user, title=None):
"""
Create and return a Revision which is a copy of this
Document's current Revision, as it will exist at a moved
location.
"""
moved_rev = self.current_revision
# Shortcut trick for getting an object with all the same
# values, but making Django think it's new.
moved_rev.id = None
moved_rev.creator = user
moved_rev.created = datetime.now()
moved_rev.slug = new_slug
if title:
moved_rev.title = title
return moved_rev
def _get_new_parent(self, new_slug):
"""
Get this moved Document's parent doc if a Document
exists at the appropriate slug and locale.
"""
return valid_slug_parent(new_slug, self.locale)
def _move_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, check
whether there is an existing, non-redirect, Document at that
slug in this locale. Any redirect existing there will be
deleted.
This is necessary since page moving is a background task, and
a Document may come into existence at the target slug after
the move is requested.
"""
existing = None
try:
existing = Document.objects.get(locale=self.locale,
slug=new_slug)
except Document.DoesNotExist:
pass
if existing is not None:
if existing.is_redirect:
existing.delete()
else:
raise Exception("Requested move would overwrite a non-redirect page.")
def _tree_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, return a
list of documents (if any) which would be overwritten by
moving this document or any of its children in that fashion.
"""
conflicts = []
try:
existing = Document.objects.get(locale=self.locale, slug=new_slug)
if not existing.is_redirect:
conflicts.append(existing)
except Document.DoesNotExist:
pass
for child in self.get_descendants():
child_title = child.slug.split('/')[-1]
try:
slug = '/'.join([new_slug, child_title])
existing = Document.objects.get(locale=self.locale, slug=slug)
if not existing.get_redirect_url():
conflicts.append(existing)
except Document.DoesNotExist:
pass
return conflicts
def _move_tree(self, new_slug, user=None, title=None):
"""
Move this page and all its children.
"""
# Page move is a 10-step process.
#
# Step 1: Sanity check. Has a page been created at this slug
# since the move was requested? If not, OK to go ahead and
# change our slug.
self._move_conflicts(new_slug)
if user is None:
user = self.current_revision.creator
if title is None:
title = self.title
# Step 2: stash our current review tags, since we want to
# preserve them.
review_tags = list(self.current_revision.review_tags.names())
# Step 3: Create (but don't yet save) a Document and Revision
# to leave behind as a redirect from old location to new.
redirect_doc, redirect_rev = self._post_move_redirects(new_slug,
user,
title)
# Step 4: Update our breadcrumbs.
new_parent = self._get_new_parent(new_slug)
# If we found a Document at what will be our parent slug, set
# it as our parent_topic. If we didn't find one, then we no
# longer have a parent_topic (since our original parent_topic
# would already have moved if it were going to).
self.parent_topic = new_parent
# Step 5: Save this Document.
self.slug = new_slug
self.save()
# Step 6: Create (but don't yet save) a copy of our current
# revision, but with the new slug and title (if title is
# changing too).
moved_rev = self._moved_revision(new_slug, user, title)
# Step 7: Save the Revision that actually moves us.
moved_rev.save(force_insert=True)
# Step 8: Save the review tags.
moved_rev.review_tags.set(*review_tags)
# Step 9: Save the redirect.
redirect_doc.save()
redirect_rev.document = redirect_doc
redirect_rev.save()
# Finally, step 10: recurse through all of our children.
for child in self.children.filter(locale=self.locale):
# Save the original slug and locale so we can use them in
# the error message if something goes wrong.
old_child_slug, old_child_locale = child.slug, child.locale
child_title = child.slug.split('/')[-1]
try:
child._move_tree('/'.join([new_slug, child_title]), user)
except PageMoveError:
# A child move already caught this and created the
# correct exception + error message, so just propagate
# it up.
raise
except Exception as e:
# One of the immediate children of this page failed to
# move.
exc_class, exc_message, exc_tb = sys.exc_info()
message = """
Failure occurred while attempting to move document
with id %(doc_id)s.
That document can be viewed at:
https://developer.mozilla.org/%(locale)s/docs/%(slug)s
The exception raised was:
Exception type: %(exc_class)s
Exception message: %(exc_message)s
Full traceback:
%(traceback)s
""" % {'doc_id': child.id,
'locale': old_child_locale,
'slug': old_child_slug,
'exc_class': exc_class,
'exc_message': exc_message,
'traceback': traceback.format_exc(e)}
raise PageMoveError(message)
def repair_breadcrumbs(self):
"""
Temporary method while we work out the real issue behind
translation/breadcrumb mismatches (bug 900961).
Basically just walks up the tree of topical parents, calling
acquire_translated_topic_parent() for as long as there's a
language mismatch.
"""
if (not self.parent_topic or
self.parent_topic.locale != self.locale):
self.acquire_translated_topic_parent()
if self.parent_topic:
self.parent_topic.repair_breadcrumbs()
def acquire_translated_topic_parent(self):
"""
This normalizes topic breadcrumb paths between locales.
Attempt to acquire a topic parent from a translation of our translation
parent's topic parent, auto-creating a stub document if necessary.
"""
if not self.parent:
# Bail, if this is not in fact a translation.
return
parent_topic = self.parent.parent_topic
if not parent_topic:
# Bail, if the translation parent has no topic parent
return
try:
# Look for an existing translation of the topic parent
new_parent = parent_topic.translations.get(locale=self.locale)
except Document.DoesNotExist:
try:
# No luck. As a longshot, let's try looking for the same slug.
new_parent = Document.objects.get(locale=self.locale,
slug=parent_topic.slug)
if not new_parent.parent:
# HACK: This same-slug/different-locale doc should probably
# be considered a translation. Let's correct that on the
# spot.
new_parent.parent = parent_topic
new_parent.save()
except Document.DoesNotExist:
# Finally, let's create a translated stub for a topic parent
new_parent = Document.objects.get(pk=parent_topic.pk)
new_parent.pk = None
new_parent.current_revision = None
new_parent.parent_topic = None
new_parent.parent = parent_topic
new_parent.locale = self.locale
new_parent.save()
if parent_topic.current_revision:
# Don't forget to clone a current revision
new_rev = Revision.objects.get(pk=parent_topic.current_revision.pk)
new_rev.pk = None
new_rev.document = new_parent
# HACK: Let's auto-add tags that flag this as a topic stub
stub_tags = '"TopicStub","NeedsTranslation"'
stub_l10n_tags = ['inprogress']
if new_rev.tags:
new_rev.tags = '%s,%s' % (new_rev.tags, stub_tags)
else:
new_rev.tags = stub_tags
new_rev.save()
new_rev.localization_tags.add(*stub_l10n_tags)
# Finally, assign the new default parent topic
self.parent_topic = new_parent
self.save()
@property
def content_parsed(self):
if not self.current_revision:
return None
return self.current_revision.content_parsed
def files_dict(self):
intermediates = DocumentAttachment.objects.filter(document__pk=self.id)
files = {}
for intermediate in intermediates:
attachment = intermediate.file
revision = attachment.current_revision
files[intermediate.name] = {
'attached_by': intermediate.attached_by.username,
'creator': revision.creator.username,
'description': revision.description,
'mime_type': revision.mime_type,
'html': attachment.get_embed_html(),
'url': attachment.get_file_url(),
}
return files
@cached_property
def attachments(self):
# Is there a more elegant way to do this?
#
# File attachments aren't really stored at the DB level;
# instead, the page just gets appropriate HTML to embed
# whatever type of file it is. So we find them by
# regex-searching over the HTML for URLs that match the
# file URL patterns.
mt_files = DEKI_FILE_URL.findall(self.html)
kuma_files = KUMA_FILE_URL.findall(self.html)
params = None
if mt_files:
# We have at least some MindTouch files.
params = models.Q(mindtouch_attachment_id__in=mt_files)
if kuma_files:
# We also have some kuma files. Use an OR query.
params = params | models.Q(id__in=kuma_files)
if kuma_files and not params:
# We have only kuma files.
params = models.Q(id__in=kuma_files)
if params:
return Attachment.objects.filter(params)
else:
# If no files found, return an empty Attachment queryset.
return Attachment.objects.none()
@property
def show_toc(self):
return self.current_revision and self.current_revision.toc_depth
@cached_property
def language(self):
return get_language_mapping()[self.locale.lower()]
def get_absolute_url(self, endpoint='wiki.document'):
"""
Build the absolute URL to this document from its full path
"""
return reverse(endpoint, locale=self.locale, args=[self.slug])
def get_edit_url(self):
return self.get_absolute_url(endpoint='wiki.edit')
def get_redirect_url(self):
"""
If I am a redirect, return the absolute URL to which I redirect.
Otherwise, return None.
"""
# If a document starts with REDIRECT_HTML and contains any <a> tags
# with hrefs, return the href of the first one. This trick saves us
# from having to parse the HTML every time.
if REDIRECT_HTML in self.html:
anchors = PyQuery(self.html)('a[href].redirect')
if anchors:
url = anchors[0].get('href')
# allow explicit domain and *not* '//'
# i.e allow "https://developer...." and "/en-US/docs/blah"
if len(url) > 1:
if url.startswith(settings.SITE_URL):
return url
elif url[0] == '/' and url[1] != '/':
return url
elif len(url) == 1 and url[0] == '/':
return url
def filter_permissions(self, user, permissions):
"""Filter permissions with custom logic"""
# No-op, for now.
return permissions
def get_topic_parents(self):
"""Build a list of parent topics from self to root"""
curr, parents = self, []
while curr.parent_topic:
curr = curr.parent_topic
parents.append(curr)
return parents
def allows_revision_by(self, user):
"""
Return whether `user` is allowed to create new revisions of me.
The motivation behind this method is that templates and other types of
docs may have different permissions.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return True
def allows_editing_by(self, user):
"""
Return whether `user` is allowed to edit document-level metadata.
If the Document doesn't have a current_revision (nothing approved) then
all the Document fields are still editable. Once there is an approved
Revision, the Document fields can only be edited by privileged users.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return (not self.current_revision or
user.has_perm('wiki.change_document'))
def translated_to(self, locale):
"""
Return the translation of me to the given locale.
If there is no such Document, return None.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
raise NotImplementedError('translated_to() is implemented only on'
'Documents in the default language so'
'far.')
try:
return Document.objects.get(locale=locale, parent=self)
except Document.DoesNotExist:
return None
@property
def original(self):
"""Return the document I was translated from or, if none, myself."""
return self.parent or self
@cached_property
def other_translations(self):
"""Return a list of Documents - other translations of this Document"""
if self.parent is None:
return self.translations.all().order_by('locale')
else:
translations = (self.parent.translations.all()
.exclude(id=self.id)
.order_by('locale'))
pks = list(translations.values_list('pk', flat=True))
return Document.objects.filter(pk__in=[self.parent.pk] + pks)
@property
def parents(self):
"""Return the list of topical parent documents above this one,
or an empty list if none exist."""
if self.parent_topic is None:
return []
current_parent = self.parent_topic
parents = [current_parent]
while current_parent.parent_topic is not None:
parents.insert(0, current_parent.parent_topic)
current_parent = current_parent.parent_topic
return parents
def is_child_of(self, other):
"""
Circular dependency detection -- if someone tries to set
this as a parent of a document it's a child of, they're gonna
have a bad time.
"""
return other.id in (d.id for d in self.parents)
# This is a method, not a property, because it can do a lot of DB
# queries and so should look scarier. It's not just named
# 'children' because that's taken already by the reverse relation
# on parent_topic.
def get_descendants(self, limit=None, levels=0):
"""
Return a list of all documents which are children
(grandchildren, great-grandchildren, etc.) of this one.
"""
results = []
if (limit is None or levels < limit) and self.children.exists():
for child in self.children.all().filter(locale=self.locale):
results.append(child)
[results.append(grandchild)
for grandchild in child.get_descendants(limit, levels + 1)]
return results
def is_watched_by(self, user):
"""Return whether `user` is notified of edits to me."""
from .events import EditDocumentEvent
return EditDocumentEvent.is_notifying(user, self)
def tree_is_watched_by(self, user):
"""Return whether `user` is notified of edits to me AND sub-pages."""
from .events import EditDocumentInTreeEvent
return EditDocumentInTreeEvent.is_notifying(user, self)
def parent_trees_watched_by(self, user):
"""
Return any and all of this document's parents that are watched by the
given user.
"""
return [doc for doc in self.parents if doc.tree_is_watched_by(user)]
def get_document_type(self):
return WikiDocumentType
@cached_property
def contributors(self):
return DocumentContributorsJob().get(self.pk)
@cached_property
def zone_stack(self):
return DocumentZoneStackJob().get(self.pk)
def get_full_url(self):
return absolutify(self.get_absolute_url())
class DocumentDeletionLog(models.Model):
"""
Log of who deleted a Document, when, and why.
"""
# We store the locale/slug because it's unique, and also because a
# ForeignKey would delete this log when the Document gets purged.
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
slug = models.CharField(max_length=255, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
timestamp = models.DateTimeField(auto_now=True)
reason = models.TextField()
def __unicode__(self):
return "/%(locale)s/%(slug)s deleted by %(user)s" % {
'locale': self.locale,
'slug': self.slug,
'user': self.user
}
class DocumentZone(models.Model):
"""
Model object declaring a content zone root at a given Document, provides
attributes inherited by the topic hierarchy beneath it.
"""
document = models.OneToOneField(Document, related_name='zone')
styles = models.TextField(null=True, blank=True)
url_root = models.CharField(
max_length=255, null=True, blank=True, db_index=True,
help_text="alternative URL path root for documents under this zone")
def __unicode__(self):
return u'DocumentZone %s (%s)' % (self.document.get_absolute_url(),
self.document.title)
class ReviewTag(TagBase):
"""A tag indicating review status, mainly for revisions"""
class Meta:
verbose_name = _('Review Tag')
verbose_name_plural = _('Review Tags')
class LocalizationTag(TagBase):
"""A tag indicating localization status, mainly for revisions"""
class Meta:
verbose_name = _('Localization Tag')
verbose_name_plural = _('Localization Tags')
class ReviewTaggedRevision(ItemBase):
"""Through model, just for review tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(ReviewTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class LocalizationTaggedRevision(ItemBase):
"""Through model, just for localization tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(LocalizationTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class Revision(models.Model):
"""A revision of a localized knowledgebase document"""
# Depth of table-of-contents in document display.
TOC_DEPTH_NONE = 0
TOC_DEPTH_ALL = 1
TOC_DEPTH_H2 = 2
TOC_DEPTH_H3 = 3
TOC_DEPTH_H4 = 4
TOC_DEPTH_CHOICES = (
(TOC_DEPTH_NONE, _(u'No table of contents')),
(TOC_DEPTH_ALL, _(u'All levels')),
(TOC_DEPTH_H2, _(u'H2 and higher')),
(TOC_DEPTH_H3, _(u'H3 and higher')),
(TOC_DEPTH_H4, _('H4 and higher')),
)
document = models.ForeignKey(Document, related_name='revisions')
# Title and slug in document are primary, but they're kept here for
# revision history.
title = models.CharField(max_length=255, null=True, db_index=True)
slug = models.CharField(max_length=255, null=True, db_index=True)
summary = models.TextField() # wiki markup
content = models.TextField() # wiki markup
tidied_content = models.TextField(blank=True) # wiki markup tidied up
# Keywords are used mostly to affect search rankings. Moderators may not
# have the language expertise to translate keywords, so we put them in the
# Revision so the translators can handle them:
keywords = models.CharField(max_length=255, blank=True)
# Tags are stored in a Revision as a plain CharField, because Revisions are
# not indexed by tags. This data is retained for history tracking.
tags = models.CharField(max_length=255, blank=True)
# Tags are (ab)used as status flags and for searches, but the through model
# should constrain things from getting expensive.
review_tags = TaggableManager(through=ReviewTaggedRevision)
localization_tags = TaggableManager(through=LocalizationTaggedRevision)
toc_depth = models.IntegerField(choices=TOC_DEPTH_CHOICES,
default=TOC_DEPTH_ALL)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
comment = models.CharField(max_length=255)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_revisions')
is_approved = models.BooleanField(default=True, db_index=True)
# The default locale's rev that was current when the Edit button was hit to
# create this revision. Used to determine whether localizations are out of
# date.
based_on = models.ForeignKey('self', null=True, blank=True)
# TODO: limit_choices_to={'document__locale':
# settings.WIKI_DEFAULT_LANGUAGE} is a start but not sufficient.
is_mindtouch_migration = models.BooleanField(default=False, db_index=True,
help_text="Did this revision come from MindTouch?")
objects = TransformManager()
def get_absolute_url(self):
"""Build the absolute URL to this revision"""
return reverse('wiki.revision',
locale=self.document.locale,
args=[self.document.slug, self.pk])
def _based_on_is_clean(self):
"""Return a tuple: (the correct value of based_on, whether the old
value was correct).
based_on must be an approved revision of the English version of the
document if there are any such revisions, any revision if no
approved revision exists, and None otherwise. If based_on is not
already set when this is called, the return value defaults to the
current_revision of the English document.
"""
# TODO(james): This could probably be simplified down to "if
# based_on is set, it must be a revision of the original document."
original = self.document.original
base = original.current_or_latest_revision()
has_approved = original.revisions.filter(is_approved=True).exists()
if (original.current_revision or not has_approved):
if (self.based_on and self.based_on.document != original):
# based_on is set and points to the wrong doc.
return base, False
# Else based_on is valid; leave it alone.
elif self.based_on:
return None, False
return self.based_on, True
def clean(self):
"""Ensure based_on is valid."""
# All of the cleaning herein should be unnecessary unless the user
# messes with hidden form data.
try:
self.document and self.document.original
except Document.DoesNotExist:
# For clean()ing forms that don't have a document instance behind
# them yet
self.based_on = None
else:
based_on, is_clean = self._based_on_is_clean()
if not is_clean:
if self.document.parent:
# Restoring translation source, so base on current_revision
self.based_on = self.document.parent.current_revision
else:
old = self.based_on
self.based_on = based_on # Guess a correct value.
locale = settings.LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native
error = ugettext(
'A revision must be based on a revision of the '
'%(locale)s document. Revision ID %(id)s does '
'not fit those criteria.')
raise ValidationError(error %
{'locale': locale, 'id': old.id})
def save(self, *args, **kwargs):
_, is_clean = self._based_on_is_clean()
if not is_clean: # No more Mister Nice Guy
# TODO(erik): This error message ignores non-translations.
raise ProgrammingError('Revision.based_on must be None or refer '
'to a revision of the default-'
'language document. It was %s' %
self.based_on)
if not self.title:
self.title = self.document.title
if not self.slug:
self.slug = self.document.slug
super(Revision, self).save(*args, **kwargs)
# When a revision is approved, update document metadata and re-cache
# the document's html content
if self.is_approved:
self.make_current()
def make_current(self):
"""Make this revision the current one for the document"""
self.document.title = self.title
self.document.slug = self.slug
self.document.html = self.content_cleaned
self.document.render_max_age = self.render_max_age
self.document.current_revision = self
# Since Revision stores tags as a string, we need to parse them first
# before setting on the Document.
self.document.tags.set(*parse_tags(self.tags))
self.document.save()
def __unicode__(self):
return u'[%s] %s #%s' % (self.document.locale,
self.document.title,
self.id)
def get_section_content(self, section_id):
"""Convenience method to extract the content for a single section"""
return self.document.extract_section(self.content, section_id)
def get_tidied_content(self, allow_none=False):
"""
Return the revision content parsed and cleaned by tidy.
First, check in denormalized db field. If it's not available, schedule
an asynchronous task to store it.
allow_none -- To prevent CPU-hogging calls, return None instead of
calling tidy_content in-process.
"""
# we may be lucky and have the tidied content already denormalized
# in the database, if so return it
if self.tidied_content:
tidied_content = self.tidied_content
else:
from .tasks import tidy_revision_content
tidying_scheduled_cache_key = 'kuma:tidying_scheduled:%s' % self.pk
# if there isn't already a task scheduled for the revision
tidying_already_scheduled = memcache.get(tidying_scheduled_cache_key)
if not tidying_already_scheduled:
tidy_revision_content.delay(self.pk)
# we temporarily set a flag that we've scheduled a task
# already and don't need to schedule it the next time
# we use 3 days as a limit to try it again
memcache.set(tidying_scheduled_cache_key, 1, 60 * 60 * 24 * 3)
if allow_none:
tidied_content = None
else:
tidied_content, errors = tidy_content(self.content)
return tidied_content
@property
def content_cleaned(self):
if self.document.is_template:
return self.content
else:
return Document.objects.clean_content(self.content)
@cached_property
def previous(self):
"""
Returns the previous approved revision or None.
"""
try:
return self.document.revisions.filter(
is_approved=True,
created__lt=self.created,
).order_by('-created')[0]
except IndexError:
return None
@cached_property
def needs_editorial_review(self):
return self.review_tags.filter(name='editorial').exists()
@cached_property
def needs_technical_review(self):
return self.review_tags.filter(name='technical').exists()
@cached_property
def localization_in_progress(self):
return self.localization_tags.filter(name='inprogress').exists()
@property
def translation_age(self):
return abs((datetime.now() - self.created).days)
class RevisionIP(models.Model):
"""
IP Address for a Revision including User-Agent string and Referrer URL.
"""
revision = models.ForeignKey(
Revision
)
ip = models.CharField(
_('IP address'),
max_length=40,
editable=False,
db_index=True,
blank=True,
null=True,
)
user_agent = models.TextField(
_('User-Agent'),
editable=False,
blank=True,
)
referrer = models.TextField(
_('HTTP Referrer'),
editable=False,
blank=True,
)
objects = RevisionIPManager()
def __unicode__(self):
return '%s (revision %d)' % (self.ip or 'No IP', self.revision.id)
class RevisionAkismetSubmission(AkismetSubmission):
"""
The Akismet submission per wiki document revision.
Stores only a reference to the submitted revision.
"""
revision = models.ForeignKey(
Revision,
related_name='akismet_submissions',
null=True,
blank=True,
verbose_name=_('Revision'),
# don't delete the akismet submission but set the revision to null
on_delete=models.SET_NULL,
)
class Meta:
verbose_name = _('Akismet submission')
verbose_name_plural = _('Akismet submissions')
def __unicode__(self):
if self.revision:
return (
u'%(type)s submission by %(sender)s (Revision %(revision_id)d)' % {
'type': self.get_type_display(),
'sender': self.sender,
'revision_id': self.revision.id,
}
)
else:
return (
u'%(type)s submission by %(sender)s (no revision)' % {
'type': self.get_type_display(),
'sender': self.sender,
}
)
class EditorToolbar(models.Model):
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_toolbars')
default = models.BooleanField(default=False)
name = models.CharField(max_length=100)
code = models.TextField(max_length=2000)
def __unicode__(self):
return self.name
class DocumentSpamAttempt(SpamAttempt):
"""
The wiki document specific spam attempt.
Stores title, slug and locale of the documet revision to be able
to see where it happens.
"""
title = models.CharField(
verbose_name=ugettext('Title'),
max_length=255,
)
slug = models.CharField(
verbose_name=ugettext('Slug'),
max_length=255,
)
document = models.ForeignKey(
Document,
related_name='spam_attempts',
null=True,
blank=True,
verbose_name=ugettext('Document (optional)'),
on_delete=models.SET_NULL,
)
def __unicode__(self):
return u'%s (%s)' % (self.slug, self.title)
|
mpl-2.0
| -1,674,403,241,280,517,600
| 37.414911
| 100
| 0.59919
| false
| 4.338377
| false
| false
| false
|
ahknight/fig-django
|
app/test_app/settings.py
|
1
|
2113
|
"""
Django settings for test_app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z#cqgk_fe7!3y8w2*f!@gcc_z5&ir-)p)_vxfjhf$9jwrxf)dt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_app.urls'
WSGI_APPLICATION = 'test_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/app/static/'
|
bsd-3-clause
| 2,028,688,857,110,303,200
| 23.569767
| 71
| 0.716517
| false
| 3.281056
| false
| false
| false
|
dashmoment/moxa_ai_training
|
CNN_HW_solution/model_zoo.py
|
1
|
4589
|
import tensorflow as tf
import netfactory as nf
import numpy as np
class model_zoo:
def __init__(self, inputs, dropout, is_training, model_ticket):
self.model_ticket = model_ticket
self.inputs = inputs
self.dropout = dropout
self.is_training = is_training
def googleLeNet_v1(self):
model_params = {
"conv1": [5,5, 64],
"conv2": [3,3,128],
"inception_1":{
"1x1":64,
"3x3":{ "1x1":96,
"3x3":128
},
"5x5":{ "1x1":16,
"5x5":32
},
"s1x1":32
},
"inception_2":{
"1x1":128,
"3x3":{ "1x1":128,
"3x3":192
},
"5x5":{ "1x1":32,
"5x5":96
},
"s1x1":64
},
"fc3": 10,
}
with tf.name_scope("googleLeNet_v1"):
net = nf.convolution_layer(self.inputs, model_params["conv1"], [1,2,2,1],name="conv1")
net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME')
net = tf.nn.local_response_normalization(net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization')
net = nf.convolution_layer(net, model_params["conv2"], [1,1,1,1],name="conv2", flatten=False)
net = tf.nn.local_response_normalization(net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization')
net = nf.inception_v1(net, model_params, name= "inception_1", flatten=False)
net = nf.inception_v1(net, model_params, name= "inception_2", flatten=False)
net = tf.nn.avg_pool (net, ksize=[1, 3, 3, 1],strides=[1, 1, 1, 1], padding='VALID')
net = tf.reshape(net, [-1, int(np.prod(net.get_shape()[1:]))])
net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2')
logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None)
return logits
def resNet_v1(self):
model_params = {
"conv1": [5,5, 64],
"rb1_1": [3,3,64],
"rb1_2": [3,3,64],
"rb2_1": [3,3,128],
"rb2_2": [3,3,128],
"fc3": 10,
}
with tf.name_scope("resNet_v1"):
net = nf.convolution_layer(self.inputs, model_params["conv1"], [1,2,2,1],name="conv1")
id_rb1 = tf.nn.max_pool(net, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME')
net = nf.convolution_layer(id_rb1, model_params["rb1_1"], [1,1,1,1],name="rb1_1")
id_rb2 = nf.convolution_layer(net, model_params["rb1_2"], [1,1,1,1],name="rb1_2")
id_rb2 = nf.shortcut(id_rb2,id_rb1, name="rb1")
net = nf.convolution_layer(id_rb2, model_params["rb2_1"], [1,2,2,1],padding="SAME",name="rb2_1")
id_rb3 = nf.convolution_layer(net, model_params["rb2_2"], [1,1,1,1],name="rb2_2")
id_rb3 = nf.shortcut(id_rb3,id_rb2, name="rb2")
net = nf.global_avg_pooling(id_rb3, flatten=True)
net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2')
logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None)
return logits
def build_model(self):
model_list = ["googleLeNet_v1", "resNet_v1"]
if self.model_ticket not in model_list:
print("sorry, wrong ticket!")
return 0
else:
fn = getattr(self, self.model_ticket)
netowrk = fn()
return netowrk
def unit_test():
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
is_training = tf.placeholder(tf.bool, name='is_training')
dropout = tf.placeholder(tf.float32, name='dropout')
mz = model_zoo(x, dropout, is_training,"resNet_v1")
return mz.build_model()
#m = unit_test()
|
mit
| 1,189,355,182,992,824,300
| 34.859375
| 143
| 0.460885
| false
| 3.384218
| false
| false
| false
|
collectiveacuity/labPack
|
tests/test_events_meetup.py
|
1
|
10267
|
__author__ = 'rcj1492'
__created__ = '2016.12'
__license__ = 'MIT'
from labpack.events.meetup import *
if __name__ == '__main__':
# import dependencies & configs
from pprint import pprint
from time import time
from labpack.records.settings import load_settings
from labpack.handlers.requests import handle_requests
meetup_config = load_settings('../../cred/meetup.yaml')
# test oauth construction
from labpack.authentication.oauth2 import oauth2Client
oauth_kwargs = {
'client_id': meetup_config['oauth_client_id'],
'client_secret': meetup_config['oauth_client_secret'],
'redirect_uri': meetup_config['oauth_redirect_uri'],
'auth_endpoint': meetup_config['oauth_auth_endpoint'],
'token_endpoint': meetup_config['oauth_token_endpoint'],
'request_mimetype': meetup_config['oauth_request_mimetype'],
'requests_handler': handle_requests
}
meetup_oauth = oauth2Client(**oauth_kwargs)
# test generate url
url_kwargs = {
'service_scope': meetup_config['oauth_service_scope'].split(),
'state_value': 'unittest_%s' % str(time())
}
auth_url = meetup_oauth.generate_url(**url_kwargs)
assert auth_url.find('oauth2') > 0
# retrieve access token
from labpack.storage.appdata import appdataClient
log_client = appdataClient(collection_name='Logs', prod_name='Fitzroy')
path_filters = [{
0: {'discrete_values': ['knowledge']},
1: {'discrete_values': ['tokens']},
2: {'discrete_values':['meetup']}
}]
import yaml
token_list = log_client.list(log_client.conditional_filter(path_filters), reverse_search=True)
token_data = log_client.load(token_list[0])
token_details = yaml.load(token_data.decode())
# test access token renewal
# new_details = meetup_oauth.renew_token(token_details['refresh_token'])
# token_details.update(**new_details['json'])
# new_key = 'knowledge/tokens/meetup/%s/%s.yaml' % (token_details['user_id'], token_details['expires_at'])
# log_client.create(new_key, token_details)
# test client construction
meetup_client = meetupClient(token_details['access_token'], token_details['service_scope'])
# test member profile, settings, topics, groups and events
profile_details = meetup_client.get_member_brief()
member_id = int(profile_details['json']['id'])
assert isinstance(profile_details['json']['id'], str)
profile_details = meetup_client.get_member_profile(member_id)
assert isinstance(profile_details['json']['id'], int)
member_topics = meetup_client.list_member_topics(member_id)
assert isinstance(member_topics['json'][0]['id'], int)
member_groups = meetup_client.list_member_groups(member_id)
assert member_groups['json'][5]['group']['name']
if len(member_groups['json']) <= 200:
assert len(member_groups['json']) == profile_details['json']['stats']['groups']
member_events = meetup_client.list_member_events()
assert isinstance(member_events['json'], list)
# test member calendar, event attendees & other member profile, settings, topics & groups
# event_details = meetup_client.get_member_calendar(max_results=10)
# group_url = event_details['json'][0]['group']['urlname']
# event_id = int(event_details['json'][0]['id'])
# event_attendees = meetup_client.list_event_attendees(group_url, event_id)
# member_id = event_attendees['json'][0]['member']['id']
# profile_details = meetup_client.get_member_brief(member_id)
# assert profile_details['json']['joined']
# profile_details = meetup_client.get_member_profile(member_id)
# assert 'bio' in profile_details['json']['privacy'].keys()
# member_topics = meetup_client.list_member_topics(member_id)
# assert isinstance(member_topics['json'], list)
# member_groups = meetup_client.list_member_groups(member_id)
# assert isinstance(member_groups['json'], list)
# test event, venue and group details and list group events
# event_details = meetup_client.get_member_calendar(max_results=10)
# group_url = event_details['json'][0]['group']['urlname']
# event_id = int(event_details['json'][0]['id'])
# venue_id = event_details['json'][0]['venue']['id']
# group_id = int(event_details['json'][0]['group']['id'])
# group_details = meetup_client.get_group_details(group_id=group_id)
# assert group_details['json']['next_event']['id']
# print(group_details['json']['join_info'])
# group_events = meetup_client.list_group_events(group_url)
# assert group_events['json'][0]['created']
# event_details = meetup_client.get_event_details(group_url, event_id)
# assert event_details['json']['event_hosts'][0]['id']
# venue_details = meetup_client.get_venue_details(venue_id)
# assert venue_details['json']['name']
# test list groups, group members and locations
# list_kwargs = {
# 'categories': [34],
# 'latitude': 40.75,
# 'longitude': -73.98,
# 'radius': 1.0,
# 'max_results': 5
# }
# group_list = meetup_client.list_groups(**list_kwargs)
# assert group_list['json'][0]['organizer']['id']
# group_url = group_list['json'][0]['urlname']
# group_members = meetup_client.list_group_members(group_url, max_results=5)
# assert group_members['json'][0]['group_profile']['created']
# list_kwargs = {
# 'zip_code': '94203',
# 'max_results': 1
# }
# meetup_locations = meetup_client.list_locations(**list_kwargs)
# assert meetup_locations['json'][0]['city'] == 'Sacramento'
# test join and leave group
# member_profile = meetup_client.get_member_brief()
# member_id = int(member_profile['json']['id'])
# list_kwargs = {
# 'categories': [34],
# 'latitude': 40.75,
# 'longitude': -73.98,
# 'radius': 2.0,
# 'max_results': 10,
# 'member_groups': False
# }
# group_list = meetup_client.list_groups(**list_kwargs)
# group_url = ''
# question_id = 0
# for group in group_list['json']:
# if not group['join_info']['questions_req'] and group['join_info']['questions']:
# for question in group['join_info']['questions']:
# question_tokens = question['question'].split()
# for token in question_tokens:
# if token.lower() == 'name':
# group_url = group['urlname']
# question_id = question['id']
# break
# if group_url:
# break
# if group_url:
# break
# if group_url and question_id:
# membership_answers = [ { 'question_id': question_id, 'answer_text': 'First Last'}]
# response = meetup_client.join_group(group_url, membership_answers)
# print(response['json'])
# from time import sleep
# sleep(2)
# group_url = 'gdgnyc'
# response = meetup_client.leave_group(group_url, member_id)
# assert response['code'] == 204
# test join and leave topics
# member_profile = meetup_client.get_member_brief()
# member_id = int(member_profile['json']['id'])
# topic_list = [ 511, 611, 766 ]
# member_topics = meetup_client.list_member_topics(member_id)
# topic_set = [x['id'] for x in member_topics['json']]
# assert len(set(topic_list) - set(topic_set)) == len(topic_list)
# updated_profile = meetup_client.join_topics(member_id, topic_list)
# added_topics = []
# for topic in updated_profile['json']:
# if topic['id'] in topic_list:
# added_topics.append(topic['name'])
# assert len(added_topics) == len(topic_list)
# from time import sleep
# sleep(1)
# updated_profile = meetup_client.leave_topics(member_id, topic_list)
# assert len(updated_profile['json']) == len(member_topics['json'])
# test update profile
# member_brief = meetup_client.get_member_brief()
# member_id = int(member_brief['json']['id'])
# member_profile = meetup_client.get_member_profile(member_id)
# member_profile['json']['privacy']['groups'] = 'visible'
# member_profile['json']['birthday']['year'] = 1991
# updated_profile = meetup_client.update_member_profile(member_brief['json'], member_profile['json'])
# assert updated_profile['json']['privacy']['groups'] == 'visible'
# member_profile['json']['privacy']['groups'] = 'hidden'
# member_profile['json']['birthday']['year'] = 0
# updated_profile = meetup_client.update_member_profile(member_brief['json'], member_profile['json'])
# assert updated_profile['json']['privacy']['groups'] == 'hidden'
# test join and leave event
# event_details = meetup_client.get_member_calendar(max_results=100)
# event_id = 0
# group_url = ''
# survey_questions = []
# for event in event_details['json']:
# if event['fee']['required']:
# pass
# elif event['rsvp_limit'] >= event['yes_rsvp_count'] + 1:
# pass
# elif not event['rsvp_rules']['guest_limit']:
# pass
# elif not event['rsvpable']:
# pass
# elif not event['survey_questions']:
# pass
# elif event['self']['rsvp']['response'] == 'yes':
# pass
# else:
# group_url = event['group']['urlname']
# event_id = int(event['id'])
# survey_questions = event['survey_questions']
# break
# if event_id:
# join_kwargs = {
# 'attendance_answers': [{'question_id': survey_questions[0]['id'], 'answer_text': 'maybe'}],
# 'group_url': group_url,
# 'event_id': event_id,
# 'additional_guests': 1
# }
# attendee_details = meetup_client.join_event(**join_kwargs)
# assert attendee_details['json']['guests'] == 1
# attendee_details = meetup_client.leave_event(group_url, event_id)
# assert attendee_details['json']['response'] == 'no'
|
mit
| 1,395,610,115,595,362,000
| 43.438053
| 110
| 0.598812
| false
| 3.278097
| true
| false
| false
|
MazamaScience/ispaq
|
ispaq/pressureCorrelation_metrics.py
|
1
|
6526
|
"""
ISPAQ Business Logic for Simple Metrics.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import math
import numpy as np
import pandas as pd
from obspy import UTCDateTime
from . import utils
from . import irisseismic
from . import irismustangmetrics
def pressureCorrelation_metrics(concierge):
"""
Generate *pressureCorrelation* metrics.
:type concierge: :class:`~ispaq.concierge.Concierge`
:param concierge: Data access expiditer.
:rtype: pandas dataframe (TODO: change this)
:return: Dataframe of pressureCorrelation metrics. (TODO: change this)
.. rubric:: Example
TODO: doctest examples
"""
# Get the logger from the concierge
logger = concierge.logger
# Container for all of the metrics dataframes generated
dataframes = []
# Default parameters from IRISMustangUtils::generateMetrics_crossTalk
includleRestricted = False
channelFilter = "LH."
pressureLocation = "*"
pressureChannel = "LDO"
# ----- All available SNCLs -------------------------------------------------
try:
pressureAvailability = concierge.get_availability(location=pressureLocation, channel=pressureChannel)
except Exception as e:
logger.error('Metric calculation failed because concierge.get_availability failed: %s' % (e))
return None
if pressureAvailability is None or pressureAvailability.shape[0] == 0:
logger.info('No pressure channels available')
return None
else:
logger.info('%d pressure channels available' % (pressureAvailability.shape[0]))
# Loop over rows of the availability dataframe
for (pIndex, pAv) in pressureAvailability.iterrows():
logger.info(' %03d Pressure channel %s' % (pIndex, pAv.snclId))
# Get the data ----------------------------------------------
try:
r_pStream = concierge.get_dataselect(pAv.network, pAv.station, pAv.location, pAv.channel, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.debug('No data for %s' % (pAv.snclId))
else:
logger.debug('No data for %s from %s: %s' % (pAv.snclId, concierge.dataselect_url, e))
continue
# Merge traces -- gracefully go to next in loop if an error reported
try:
r_pStream = irisseismic.mergeTraces(r_pStream)
except Exception as e:
logger.debug("%s" % (e))
continue
# Get all desired seismic channels for this network-station
seismicAvailability = concierge.get_availability(pAv.network, pAv.station)
# Apply the channelFilter
seismicAvailability = seismicAvailability[seismicAvailability.channel.str.contains(channelFilter)]
if seismicAvailability is None or seismicAvailability.shape[0] == 0:
logger.debug('No seismic %s channels available' %s (channelFilter))
continue
# Find the locations associated with seismic channels
locations = list(seismicAvailability.location.unique())
# NOTE: At each unique location we should have a triplet of seismic channels that can
# NOTE: be correlated with the pressure channel
############################################################
# Loop through all locations with seismic data that can be
# correlated to this pressure channel.
############################################################
for loc in locations:
logger.debug('Working on location %s' % (loc))
locationAvailability = seismicAvailability[seismicAvailability.location == loc]
if locationAvailability is None or locationAvailability.shape[0] == 0:
logger.debug('No location %s channels available' %s (loc))
continue
############################################################
# Loop through all seismic channels at this SN.L
############################################################
# Loop over rows of the availability dataframe
for (index, lAv) in locationAvailability.iterrows():
try:
r_stream = concierge.get_dataselect(lAv.network, lAv.station, lAv.location, lAv.channel, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.debug('No data for %s' % (lAv.snclId))
else:
logger.debug('No data for %s from %s: %s' % (lAv.snclId, concierge.dataselect_url, e))
continue
# Merge traces -- gracefully go to next in loop if an error reported
try:
r_stream = irisseismic.mergeTraces(r_stream)
except Exception as e:
logger.debug("%s" % (e))
continue
logger.debug('Calculating pressureCorrelation metrics for %s:%s' % (pAv.snclId, lAv.snclId))
try:
df = irismustangmetrics.apply_correlation_metric(r_pStream, r_stream, 'correlation')
dataframes.append(df)
except Exception as e:
logger.debug('"pressure_effects" metric calculation failed for %s:%s: %s' % (pAv.snclId, lAv.snclId, e))
# End of locationAvailability loop
# End of locations loop
# End of pressureAvailability loop
# Concatenate and filter dataframes before returning -----------------------
if len(dataframes) == 0:
logger.warn('"pressure_correlation" metric calculation generated zero metrics')
return None
else:
result = pd.concat(dataframes, ignore_index=True)
# Change metricName to "pressure_effects"
result['metricName'] = 'pressure_effects'
result.reset_index(drop=True, inplace=True)
return(result)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
gpl-3.0
| 7,286,951,401,950,999,000
| 37.163743
| 138
| 0.564818
| false
| 4.519391
| false
| false
| false
|
dodger487/MIST
|
data/magnetak_ml.py
|
1
|
23641
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Chris Riederer
# Google, Inc
# 2014-08-26
"""Contains everything related to machine learning in magnetak"""
import magnetak_detectors
import magnetak_util
import numpy as np
import scipy
import scipy.spatial
import scipy.spatial.distance
import sklearn
import sklearn.cross_validation
import sklearn.svm
import sklearn.linear_model
class MLDetector(magnetak_detectors.Detector):
"""A simple detector which detects a button press if magnet vector magnitude is
above a certain threshold"""
def __init__(self):
self.clf = None
# fcn that takes magnetomer data and converts it to a feature vector
self.MagnetToVectorObj = None
self.lookBehindTime = 400 #ms
self.waitTime = 350 # ms
def detect(self, runData):
lookBehindTime = self.lookBehindTime * 1e6 # convert to nanoseconds
waitTime = self.waitTime *1e6
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0] # times
lastFiring = 0 # keep track of last time button was pulled
for sensorTime in domain[domain > domain[0]+lookBehindTime]:
# wait for a full window before looking again
if sensorTime - lastFiring < waitTime:
continue
window = data[(domain > sensorTime - lookBehindTime) & (domain <= sensorTime)]
# wait to fire if we don't have any sensor events
if len(window) == 0:
continue
X = window[:,2]
Y = window[:,3]
Z = window[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
# some basic thresholds, put in sequence for easy commenting-out!
if abs(magnitudes[0] - magnitudes[-1]) > 500:
continue
# if min(magnitudes) > 1400:
# continue
if max(magnitudes) - min(magnitudes) < 30:
continue
featureVector = self.MagnetToVectorObj.featurize(window)
if self.clf.predict(featureVector)[0]:
detections.append(sensorTime)
lastFiring = sensorTime
return detections
class MagnetometerToFeatureVector(object):
def featurize(self, magnetometer):
"""This method should take in magnetometer data and output a feature vector"""
raise NotImplementedError("Please implement this method")
class MagnitudeTemplateSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
self.window_size = 400000000
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, magnitudes, self.templates[3])]
class AllAxesTemplateSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
self.window_size = 400 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
]
class ManyFeaturesSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
# self.window_size = 500 * 1e6
self.window_size = 450 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
magnitudes[0] - magnitudes[-1],
max(magnitudes),
min(magnitudes),
max(magnitudes) - min(magnitudes),
]
class RawMagnitudeManyFeaturesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
# self.window_size = 500 * 1e6
self.window_size = 450 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class NegAndPosTemplatesMagToVec(MagnetometerToFeatureVector):
def __init__(self, posTemplates, negTemplates):
self.posTemplates = posTemplates
self.negTemplates = negTemplates
self.window_size = 450 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
# features = [f if not np.isnan(f) else 0 for f in features]
# return features
return distance if not np.isnan(distance) else 0
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class KitchenSync(MagnetometerToFeatureVector):
def __init__(self, posTemplates):
self.posTemplates = posTemplates
self.window_size = 400 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
return distance if not np.isnan(distance) else 0
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class MagnitudeFeaturesDataToVec(MagnetometerToFeatureVector):
def __init__(self):
self.window_size = 450 * 1e6
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class TestTemplateDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, posTemplates):
self.posTemplates = posTemplates
self.window_size = 450 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def SquareSumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
return distance
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
features = [
self.SumOfDifferences(domain, X, self.posTemplates[0]),
self.SumOfDifferences(domain, Y, self.posTemplates[1]),
self.SumOfDifferences(domain, Z, self.posTemplates[2]),
self.SumOfDifferences(domain, magnitudes, self.posTemplates[3]),
self.SumOfDifferences(domain, X, self.negTemplates[0]),
self.SumOfDifferences(domain, Y, self.negTemplates[1]),
self.SumOfDifferences(domain, Z, self.negTemplates[2]),
self.SumOfDifferences(domain, magnitudes, self.negTemplates[3]),
self.SquareSumOfDifferences(domain, X, self.posTemplates[0]),
self.SquareSumOfDifferences(domain, Y, self.posTemplates[1]),
self.SquareSumOfDifferences(domain, Z, self.posTemplates[2]),
self.SquareSumOfDifferences(domain, magnitudes, self.posTemplates[3]),
self.SquareSumOfDifferences(domain, X, self.negTemplates[0]),
self.SquareSumOfDifferences(domain, Y, self.negTemplates[1]),
self.SquareSumOfDifferences(domain, Z, self.negTemplates[2]),
self.SquareSumOfDifferences(domain, magnitudes, self.negTemplates[3]),
self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
]
features = [f if not np.isnan(f) else 0 for f in features]
return features
class CloseToOriginal(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.T1 = 30
self.T2 = 130
self.segment_time = 200 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
# magData = magData[ domain < domain[0] + self.window_size ]
segment1 = data[(domain <= domain[0] + segment_time_ns)]
segment2 = data[(domain > domain[0] + segment_time_ns) & (domain <= domain[0] + window_size)]
# window = data[(domain > sensorTime - window_size) & (domain <= sensorTime)]
if len(segment1) == 0 or len(segment2) == 0:
return [0,0]
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
return [min(norms1), max(norms2)]
class ThreePartFeaturizer(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.segment1_time = 100
self.segment2_time = 200 # ms
self.segment3_time = 100 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time1_ns = self.segment1_time * 1e6 # convert to nanoseconds
segment_time2_ns = self.segment2_time * 1e6 # convert to nanoseconds
segment_time3_ns = self.segment3_time * 1e6 # convert to nanoseconds
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
segment1 = data[(domain <= domain[0] + segment_time1_ns)]
segment2 = data[(domain > domain[0] + segment_time1_ns) &
(domain <= domain[0] + segment_time1_ns + segment_time2_ns)]
segment3 = data[(domain > domain[0] + segment_time1_ns + segment_time2_ns) &
(domain <= domain[0] + segment_time1_ns + segment_time2_ns + segment_time3_ns)]
if len(segment1) == 0 or len(segment2) == 0 or len(segment3) == 0:
return [0,0,0]
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
offsets3 = segment3[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
norms3 = [np.linalg.norm(row) for row in offsets3]
return [max(norms1), max(norms2), max(norms3)]
class WindowFeaturizer(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.segment_time = 200 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
# magData = magData[ domain < domain[0] + self.window_size ]
segment1 = data[(domain <= domain[0] + segment_time_ns)]
segment2 = data[(domain > domain[0] + segment_time_ns) & (domain <= domain[0] + window_size)]
if len(segment1) == 0 or len(segment2) == 0:
return np.array([0,0,0,0,0,0,0,0,0,0,0,0])
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
window = data[(domain <= domain[0] + window_size)]
window = window - window[0,:]
norms_scaled = [np.linalg.norm(row[2:5]) for row in window]
# X = window[:,2]
# Y = window[:,3]
# Z = window[:,4]
# magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
scaled_magnitudes = np.array(magnetak_util.scale(norms_scaled))
scaled_segment1 = np.array(scaled_magnitudes[(window[:,0] < segment_time_ns)])
scaled_segment2 = np.array(scaled_magnitudes[(window[:,0] > segment_time_ns) & (window[:,0] <= window_size)])
# print len(norms1), len(norms2)
# print len(scaled_segment1), len(scaled_segment2)
return np.array([
min(norms1),
max(norms1),
np.mean(norms1),
min(norms2),
max(norms2),
np.mean(norms2),
min(scaled_segment1),
max(scaled_segment1),
np.mean(scaled_segment1),
min(scaled_segment2),
max(scaled_segment2),
np.mean(scaled_segment2),
])
def GenerateData(runDataList, DataToVectorObj):
"""Given a list of runData objects, and a Featurizer object, returns a list
of feature vectors and labels"""
X, Y = [], []
for runData in runDataList:
# print runData['filename']
features = DataToVectorObj.featurize(runData['magnetometer'])
# if float('NaN') in features or float('inf') in features or float('-inf') in features:
# print runData['filename']
# if len(filter(np.isnan, features)) > 1:
# print runData['filename']
X.append(features)
if len(runData['labels']) > 0:
label = runData['labels'][0][1] # label of first labeled item
Y.append(label)
else:
Y.append(0)
return np.array(X), np.array(Y)
def TrainDetectorOnData(runDataList, featurizer):
"""Given a list of runData objects and a Featurizer, creates training data
and trains an algorithm. Returns a trained MLDetector object.
"""
# TODO(cjr): make options for using other algorithms
# train, test = sklearn.cross_validation.train_test_split(runDataList)
positives = [rd for rd in runDataList if len(rd['labels']) > 0]
posTemplates = magnetak_util.CreateTemplates(positives)
negatives = [rd for rd in runDataList if len(rd['labels']) == 0]
negTemplates = magnetak_util.CreateTemplates(negatives)
trainX, trainY = GenerateData(runDataList, featurizer)
# clf = sklearn.svm.LinearSVC()
# clf = sklearn.svm.SVC(kernel='linear')
clf = sklearn.linear_model.LogisticRegression()
clf.fit(trainX, trainY)
print clf.coef_
detector = MLDetector()
detector.clf = clf
detector.MagnetToVectorObj = featurizer
return detector
|
apache-2.0
| 1,774,346,763,933,319,000
| 37.378247
| 113
| 0.646419
| false
| 3.323166
| false
| false
| false
|
alkaitz/general-programming
|
water_level/water_level.py
|
1
|
1035
|
'''
Created on Aug 1, 2017
@author: alkaitz
'''
'''
An integer array defines the height of a 2D set of columns. After it rains enough amount of water,
how much water will be contained in the valleys formed by these mountains?
Ex: [3 2 3]
X X X W X
X X X -> X X X -> 1
X X X X X X
'''
def water_level(a):
if not a:
raise "Array cannot be empty"
water = 0
leftIndex, rightIndex = 0, len(a) - 1
left, right = a[0], a[-1]
while leftIndex <= rightIndex:
if left <= right:
water += max(left - a[leftIndex], 0)
left = max(left, a[leftIndex])
leftIndex += 1
else:
water += max(right - a[rightIndex], 0)
right = max(right, a[rightIndex])
rightIndex -= 1
return water
if __name__ == '__main__':
assert(water_level([3, 2, 3]) == 1)
assert(water_level([1, 2, 3, 4]) == 0)
assert(water_level([5, 1, 3, 4]) == 4)
assert(water_level([2, 1, 4, 3, 6]) == 2)
print "Successful"
|
mit
| 4,423,365,319,701,444,600
| 26.263158
| 102
| 0.533333
| false
| 3.194444
| false
| false
| false
|
jonfoster/pyxb1
|
examples/ndfd/showreq.py
|
1
|
1914
|
import pyxb.utils.domutils
import xml.dom
import xml.dom.minidom
import pyxb.namespace
# Structure
#import DWML
#print 'Validating DWML'
#DWML.Namespace.validateSchema()
#print 'Validated DWML: types %s' % ("\n".join(DWML.Namespace.typeDefinitions().keys()),)
xmls = open('NDFDgen.xml').read()
dom = xml.dom.minidom.parseString(xmls)
body_dom = dom.documentElement.firstChild.nextSibling.firstChild.nextSibling
print body_dom
# Service interface types
import ndfd
# WSDL
import pyxb.bundles.wssplat.wsdl11 as wsdl
uri_src = open('ndfdXML.wsdl')
doc = xml.dom.minidom.parseString(uri_src.read())
spec = wsdl.definitions.createFromDOM(doc.documentElement, process_schema=True)
binding = spec.binding[0]
print binding.name
port_type = spec.portType[0]
print port_type.name
bop = binding.operationMap()[body_dom.localName]
print bop.toxml("utf-8")
pop = port_type.operationMap()[body_dom.localName]
print pop.toxml("utf-8")
input = pop.input
print input.toxml("utf-8")
print type(input)
print input.message
im_en = input._namespaceContext().interpretQName(input.message)
print im_en
msg = im_en.message()
print msg
for p in msg.part:
print p.toxml("utf-8")
msg_ns = pyxb.namespace.NamespaceForURI(body_dom.namespaceURI)
print '%s %s' % (body_dom.namespaceURI, msg_ns)
parts = msg.part
nodes = body_dom.childNodes
while parts and nodes:
p = parts.pop(0)
while nodes and (not (xml.dom.Node.ELEMENT_NODE == nodes[0].nodeType)):
nodes.pop(0)
assert nodes
n = nodes.pop(0)
if p.name != n.localName:
print 'Desynchronized: part %s expected node %s' % (p.name, n.localName)
nodes.insert(0, n)
continue
print '%s %s' % (p.name, n.localName)
#print '%s yielded %s' msg_ns
#msg = spec.messageMap()
#print msg
#print req
#dom_support = req.toDOM(pyxb.utils.domutils.BindingDOMSupport())
#dom_support.finalize()
#print dom_support.document().toxml("utf-8")
|
apache-2.0
| -6,638,820,040,696,046,000
| 25.583333
| 89
| 0.718913
| false
| 2.958269
| false
| false
| false
|
huazhisong/graduate_text
|
src/rnn/utils.py
|
1
|
4176
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import time
import csv
import collections
import cPickle as pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.contrib import learn
class TextLoader(object):
def __init__(self, utils_dir, data_path, batch_size, seq_length, vocab, labels, encoding='utf8'):
self.data_path = data_path
self.batch_size = batch_size
self.seq_length = seq_length
self.encoding = encoding
if utils_dir is not None:
self.utils_dir = utils_dir
label_file = os.path.join(utils_dir, 'labels.pkl')
vocab_file = os.path.join(utils_dir, 'vocab.pkl')
corpus_file = os.path.join(utils_dir, 'corpus.txt')
with open(label_file, 'r') as f:
self.labels = pickle.load(f)
self.label_size = len(self.labels)
if not os.path.exists(vocab_file):
print 'reading corpus and processing data'
self.preprocess(vocab_file, corpus_file, data_path)
else:
print 'loading vocab and processing data'
self.load_preprocessed(vocab_file, data_path)
elif vocab is not None and labels is not None:
self.vocab = vocab
self.vocab_size = len(vocab) + 1
self.labels = labels
self.label_size = len(self.labels)
self.load_preprocessed(None, data_path)
self.reset_batch_pointer()
def transform(self, d):
new_d = map(self.vocab.get, d[:self.seq_length])
new_d = map(lambda i: i if i else 0, new_d)
if len(new_d) >= self.seq_length:
new_d = new_d[:self.seq_length]
else:
new_d = new_d + [0] * (self.seq_length - len(new_d))
return new_d
def preprocess(self, vocab_file, corpus_file, data_path):
with open(corpus_file, 'r') as f:
corpus = f.readlines()
corpus = ''.join(map(lambda i: i.strip(), corpus))
try:
corpus = corpus.decode('utf8')
except Exception as e:
# print e
pass
counter = collections.Counter(corpus)
count_pairs = sorted(counter.items(), key=lambda i: -i[1])
self.chars, _ = zip(*count_pairs)
with open(vocab_file, 'wb') as f:
pickle.dump(self.chars, f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars)+1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def load_preprocessed(self, vocab_file, data_path):
if vocab_file is not None:
with open(vocab_file, 'rb') as f:
self.chars = pickle.load(f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars)+1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def create_batches(self):
self.num_batches = int(self.tensor.shape[0] / self.batch_size)
if self.num_batches == 0:
assert False, 'Not enough data, make batch_size small.'
np.random.shuffle(self.tensor)
tensor = self.tensor[:self.num_batches * self.batch_size]
self.x_batches = np.split(tensor[:, :-1], self.num_batches, 0)
self.y_batches = np.split(tensor[:, -1], self.num_batches, 0)
def next_batch(self):
x = self.x_batches[self.pointer]
y = self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.create_batches()
self.pointer = 0
|
agpl-3.0
| 323,030,258,419,681,400
| 31.95122
| 101
| 0.559148
| false
| 3.590714
| false
| false
| false
|
USGSDenverPychron/pychron
|
launchers/pydiode.py
|
1
|
1078
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from helpers import entry_point
entry_point('pydiode', 'PyDiode', '_diode')
# ============= EOF =============================================
|
apache-2.0
| -2,949,165,895,291,454,000
| 43.916667
| 81
| 0.519481
| false
| 5.084906
| false
| false
| false
|
pymedusa/SickRage
|
medusa/show/coming_episodes.py
|
1
|
7304
|
# coding=utf-8
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from builtins import object
from builtins import str
from datetime import date, timedelta
from operator import itemgetter
from medusa import app
from medusa.common import (
ARCHIVED,
DOWNLOADED,
IGNORED,
SNATCHED,
SNATCHED_BEST,
SNATCHED_PROPER,
UNAIRED,
WANTED
)
from medusa.db import DBConnection
from medusa.helper.common import dateFormat, timeFormat
from medusa.helpers.quality import get_quality_string
from medusa.network_timezones import parse_date_time
from medusa.sbdatetime import sbdatetime
from medusa.tv.series import SeriesIdentifier
class ComingEpisodes(object):
"""
Missed: yesterday...(less than 1 week)
Today: today
Soon: tomorrow till next week
Later: later than next week
"""
categories = ['later', 'missed', 'soon', 'today']
sorts = {
'date': itemgetter('localtime'),
'network': itemgetter('network', 'localtime'),
'show': itemgetter('show_name', 'localtime'),
}
def __init__(self):
pass
@staticmethod
def get_coming_episodes(categories, sort, group, paused=app.COMING_EPS_DISPLAY_PAUSED):
"""
:param categories: The categories of coming episodes. See ``ComingEpisodes.categories``
:param sort: The sort to apply to the coming episodes. See ``ComingEpisodes.sorts``
:param group: ``True`` to group the coming episodes by category, ``False`` otherwise
:param paused: ``True`` to include paused shows, ``False`` otherwise
:return: The list of coming episodes
"""
categories = ComingEpisodes._get_categories(categories)
sort = ComingEpisodes._get_sort(sort)
today = date.today().toordinal()
next_week = (date.today() + timedelta(days=7)).toordinal()
recently = (date.today() - timedelta(days=app.COMING_EPS_MISSED_RANGE)).toordinal()
status_list = [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER,
ARCHIVED, IGNORED]
db = DBConnection()
fields_to_select = ', '.join(
['airdate', 'airs', 'e.description as description', 'episode', 'imdb_id', 'e.indexer',
'indexer_id', 'name', 'network', 'paused', 's.quality', 'runtime', 'season', 'show_name',
'showid', 's.status']
)
results = db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND airdate >= ? '
'AND airdate < ? '
'AND s.indexer = e.indexer '
'AND s.indexer_id = e.showid '
'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
[today, next_week] + status_list
)
done_shows_list = [int(result['showid']) for result in results]
placeholder = ','.join(['?'] * len(done_shows_list))
placeholder2 = ','.join(['?'] * len([DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]))
# FIXME: This inner join is not multi indexer friendly.
results += db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND showid NOT IN (' + placeholder + ') '
'AND s.indexer_id = e.showid '
'AND airdate = (SELECT airdate '
'FROM tv_episodes inner_e '
'WHERE inner_e.season != 0 '
'AND inner_e.showid = e.showid '
'AND inner_e.indexer = e.indexer '
'AND inner_e.airdate >= ? '
'ORDER BY inner_e.airdate ASC LIMIT 1) '
'AND e.status NOT IN (' + placeholder2 + ')',
done_shows_list + [next_week] + [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]
)
results += db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND s.indexer_id = e.showid '
'AND airdate < ? '
'AND airdate >= ? '
'AND e.status IN (?,?) '
'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
[today, recently, WANTED, UNAIRED] + status_list
)
for index, item in enumerate(results):
item['series_slug'] = str(SeriesIdentifier.from_id(int(item['indexer']), item['indexer_id']))
results[index]['localtime'] = sbdatetime.convert_to_setting(
parse_date_time(item['airdate'], item['airs'], item['network']))
results.sort(key=ComingEpisodes.sorts[sort])
if not group:
return results
grouped_results = ComingEpisodes._get_categories_map(categories)
for result in results:
if result['paused'] and not paused:
continue
result['airs'] = str(result['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
result['airdate'] = result['localtime'].toordinal()
if result['airdate'] < today:
category = 'missed'
elif result['airdate'] >= next_week:
category = 'later'
elif result['airdate'] == today:
category = 'today'
else:
category = 'soon'
if len(categories) > 0 and category not in categories:
continue
if not result['network']:
result['network'] = ''
result['quality'] = get_quality_string(result['quality'])
result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')
result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()
result['tvdbid'] = result['indexer_id']
result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)
result['localtime'] = result['localtime'].toordinal()
grouped_results[category].append(result)
return grouped_results
@staticmethod
def _get_categories(categories):
if not categories:
return []
if not isinstance(categories, list):
return categories.split('|')
return categories
@staticmethod
def _get_categories_map(categories):
if not categories:
return {}
return {category: [] for category in categories}
@staticmethod
def _get_sort(sort):
sort = sort.lower() if sort else ''
if sort not in ComingEpisodes.sorts:
return 'date'
return sort
|
gpl-3.0
| -8,968,734,022,603,750,000
| 35.52
| 120
| 0.583242
| false
| 3.846235
| false
| false
| false
|
okffi/decisions
|
web/decisions/subscriptions/models.py
|
1
|
4698
|
from __future__ import unicode_literals
import os
import base64
from datetime import timedelta
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.postgres import fields as pgfields
def make_confirm_code():
return base64.b64encode(os.urandom(15))
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', related_name="profile")
email_confirmed = models.DateTimeField(null=True, blank=True)
email_confirm_code = models.CharField(
max_length=20,
default=make_confirm_code
)
email_confirm_sent_on = models.DateTimeField(null=True, blank=True)
extra = pgfields.JSONField(default=dict)
def __unicode__(self):
return self.user.username
def confirmed_email(self):
if self.email_confirmed:
return self.user.email
class SubscriptionUser(models.Model):
user = models.ForeignKey('auth.User')
subscription = models.ForeignKey('Subscription')
active = models.BooleanField(default=True, verbose_name=_('Active'))
send_mail = models.BooleanField(
default=False,
verbose_name=_('Sends email')
)
subscribed_at = models.DateTimeField(default=now)
def __unicode__(self):
return u"%s: %s" % (self.user, self.subscription)
def is_fresh(self, for_user):
return self.subscription.subscriptionhit_set.filter(
created__gt=now()-timedelta(days=3),
notified_users=for_user
).count()
class Meta:
verbose_name = _("subscribed user")
verbose_name_plural = _("subscribed users")
class SubscriptionQuerySet(models.QuerySet):
def get_fresh(self):
return (
self
.filter(
subscriptionhit__created__gt=now()-timedelta(days=3)
)
.annotate(hit_count=models.Count('subscriptionhit'))
.filter(hit_count__gt=0)
)
class Subscription(models.Model):
subscribed_users = models.ManyToManyField(
'auth.User',
through=SubscriptionUser
)
previous_version = models.ForeignKey(
'self',
null=True,
blank=True,
related_name="next_versions"
)
HAYSTACK, GEO = range(2)
BACKEND_CHOICES = (
(HAYSTACK, _("Text Search")),
(GEO, _("Map Search")),
)
search_backend = models.IntegerField(
default=HAYSTACK,
choices=BACKEND_CHOICES,
verbose_name=_("Search type")
)
search_term = models.CharField(
max_length=300,
verbose_name=_('Search term')
)
created = models.DateTimeField(default=now)
extra = pgfields.JSONField(default=dict)
objects = SubscriptionQuerySet.as_manager()
def is_geo_search(self):
return self.search_backend == self.GEO
def is_text_search(self):
return self.search_backend == self.HAYSTACK
def __unicode__(self):
return self.search_term
class Meta:
verbose_name = _("subscription")
verbose_name_plural = _("subscriptions")
get_latest_by = 'created'
class SubscriptionHit(models.Model):
subscriptions = models.ManyToManyField(Subscription)
notified_users = models.ManyToManyField('auth.User')
created = models.DateTimeField(default=now)
subject = models.CharField(max_length=300)
link = models.CharField(max_length=300)
SEARCH_RESULT, COMMENT_REPLY = range(2)
HIT_TYPES = (
(SEARCH_RESULT, _("Search result")),
(COMMENT_REPLY, _("Comment reply")),
)
hit_type = models.IntegerField(default=SEARCH_RESULT, choices=HIT_TYPES)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
hit = GenericForeignKey('content_type', 'object_id')
extra = pgfields.JSONField(default=dict)
# utility functions to allow template checks
def is_comment_reply(self):
return self.hit_type == self.COMMENT_REPLY
def is_search_result(self):
return self.hit_type == self.SEARCH_RESULT
def format_subject(self):
"translated, formatted subject line"
if "subject_mapping" in self.extra:
return _(self.subject) % self.extra["subject_mapping"]
return self.subject
def __unicode__(self):
return self.subject
class Meta:
verbose_name = _("subscription hit")
verbose_name_plural = _("subscription hits")
get_latest_by = "created"
ordering = ('-created',)
|
bsd-3-clause
| -2,500,066,219,397,966,300
| 28.923567
| 76
| 0.649638
| false
| 4.124671
| false
| false
| false
|
rowinggolfer/openmolar2
|
src/lib_openmolar/client/qt4/dialogs/save_discard_cancel_dialog.py
|
1
|
3474
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
from PyQt4 import QtCore, QtGui
from lib_openmolar.common.qt4.dialogs import ExtendableDialog
class SaveDiscardCancelDialog(ExtendableDialog):
def __init__(self, message, changes, parent=None):
'''
offers a choiced of save discard cancel, but allows for examination
of what has changed.
changes should be a function, which returns a string list
'''
ExtendableDialog.__init__(self, parent)
self.set_advanced_but_text(_("What's changed?"))
self.apply_but.setText("&Save")
self.enableApply()
self.save_on_exit = True
label = QtGui.QLabel(message)
label.setAlignment(QtCore.Qt.AlignCenter)
self.insertWidget(label)
self.discard_but = self.button_box.addButton(
QtGui.QDialogButtonBox.Discard)
self.changes = changes
self.changes_list_widget = QtGui.QListWidget()
self.add_advanced_widget(self.changes_list_widget)
def sizeHint(self):
return QtCore.QSize(400,100)
def _clicked(self, but):
if but == self.discard_but:
self.discard()
return
ExtendableDialog._clicked(self, but)
def discard(self):
if QtGui.QMessageBox.question(self,_("Confirm"),
_("Are you sure you want to discard these changes?"),
QtGui.QMessageBox.No | QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No )==QtGui.QMessageBox.Yes:
self.save_on_exit = False
self.accept()
def showExtension(self, extend):
if extend:
self.changes_list_widget.clear()
self.changes_list_widget.addItems(self.changes())
ExtendableDialog.showExtension(self, extend)
if __name__ == "__main__":
from gettext import gettext as _
def changes():
return ["Sname","Fname"]
app = QtGui.QApplication([])
message = "You have unsaved changes"
dl = SaveDiscardCancelDialog(message, changes)
print dl.exec_()
|
gpl-3.0
| -4,159,314,006,897,088,500
| 39.882353
| 79
| 0.519862
| false
| 4.700947
| false
| false
| false
|
amdegroot/ssd.pytorch
|
layers/box_utils.py
|
1
|
9435
|
# -*- coding: utf-8 -*-
import torch
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
|
mit
| 5,635,319,637,972,045,000
| 38.443515
| 80
| 0.594463
| false
| 3.229531
| false
| false
| false
|
opendatatrentino/ckan-api-client
|
ckan_api_client/tests/unit/test_utils_diff.py
|
1
|
1092
|
from ckan_api_client.tests.utils.diff import diff_mappings, diff_sequences
def test_diff_dicts():
dct1 = {
'one': 'VAL-1',
'two': 'VAL-2',
'three': 'VAL-3',
'four': 'VAL-4',
'five': 'VAL-5',
}
dct2 = {
'three': 'VAL-3',
'four': 'VAL-4-2',
'five': 'VAL-5-2',
'six': 'VAL-6',
'seven': 'VAL-7',
'eight': 'VAL-8',
}
diff = diff_mappings(dct1, dct2)
assert diff['common'] == set(['three', 'four', 'five'])
assert diff['left'] == set(['one', 'two'])
assert diff['right'] == set(['six', 'seven', 'eight'])
assert diff['differing'] == set(['four', 'five'])
def test_diff_sequences():
diff = diff_sequences([1, 2, 3], [1, 2, 9])
assert diff['length_match'] is True
assert diff['differing'] == set([2])
diff = diff_sequences([1, 2], [])
assert diff['length_match'] is False
assert diff['differing'] == set()
diff = diff_sequences([0, 0, 0, 0], [0, 1, 0, 1])
assert diff['length_match'] is True
assert diff['differing'] == set([1, 3])
|
bsd-2-clause
| 2,361,998,526,642,072,000
| 26.3
| 74
| 0.508242
| false
| 3.041783
| false
| false
| false
|
hyperreal/GanjaBot
|
magnet_utils.py
|
1
|
4242
|
#
# This file is part of Magnet2.
# Copyright (c) 2011 Grom PE
#
# Magnet2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Magnet2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magnet2. If not, see <http://www.gnu.org/licenses/>.
#
import xmpp, os, time, cPickle
try:
from HTMLParser import HTMLParser
htmlparser_available = True
except:
htmlparser_available = False
def iq_set_affiliation(room, nick, affiliation, reason=None):
iq = xmpp.Iq('set', xmpp.NS_MUC_ADMIN, {}, room)
item = iq.getTag('query').setTag('item')
item.setAttr('nick', nick)
item.setAttr('affiliation', affiliation)
if reason: item.addChild('reason', {}, reason)
return iq
def iq_set_role(room, nick, role, reason=None):
iq = xmpp.Iq('set', xmpp.NS_MUC_ADMIN, {}, room)
item = iq.getTag('query').setTag('item')
item.setAttr('nick', nick)
item.setAttr('role', role)
if reason: item.addChild('reason', {}, reason)
return iq
def serialize(fname, data):
f = open(fname, 'wb')
result = cPickle.dump(data, f, 2)
f.close()
return result
def unserialize(fname):
if not os.path.exists(fname): return False
f = open(fname, 'rb')
result = cPickle.load(f)
f.close()
return result
def writelog(filename, text):
s = '[%s] %s\n'%(time.strftime('%d %b %Y %H:%M:%S'), text)
f = open(filename, 'a')
f.write(s.encode('utf-8'))
f.close()
def hasbadwords(text):
badwords = ['palenie.cz', 'paleniecz', 'fakeweed', 'gbac', 'odczynnikchemiczne', 'rcsafe', 'GRC', 'genuine research chemicals', 'genuine', 'geniue', 'genuineresearchchemicals', 'genuine-rc.nl', 'befree', 'befreerc', 'b3', 'officialbenzofury', 'ucygana', 'rcwolf.nl', 'rcwolf', 'black-chem', 'blackchem', 'ksero24', 'pv8.nl', 'brainwasher', 'r-c.com', 'gamma cleaner', 'gammacleaner', 'eurochemicalsco', 'hajsenberg', 'topone', 'chemiczni.eu', 'how-high', 'legalchem', 'legalchem.pl', 'designerchemical', 'odczynniki.cz', 'legalne-ziola', 'synthetics.pl', 'coolchem', 'rcforyou.net', 'rc4you', 'rcforyou', 'rcchemicals', 'mefedron.pl', 'bazarr.nl', 'bazarr', 'fakehash.pl', 'stymulab', 'paularc', 'fakeshop', 'get-rc', 'peakowski', 'r-c', 'rc.pl', 'giene', 'gienk', 'kolekcjoner.nl', 'kolekcjonernl', 'gblchrom']
textl = text.replace(u'\xad', '').lower()
for word in badwords:
if word in textl: return True
return False
def unhtml(content):
if htmlparser_available:
return HTMLParser().unescape(content)
content = content.replace('<', '<')
content = content.replace('>', '>')
content = content.replace('"', '"')
content = content.replace(''', "'")
return content.replace('&', '&')
def timeformat(s):
s = s//1 # Rounding
days = s//86400
s -= days*86400
hours = s//3600
s -= hours*3600
minutes = s//60
s -= minutes*60
result = ''
limit = 0
if days>0:
result += ' %d day%s'%(days, ('', 's')[days>1])
limit = 2
if hours>0:
result += ' %d hour%s'%(hours, ('', 's')[hours>1])
limit += 1
if limit<2 and minutes>0:
result += ' %d minute%s'%(minutes, ('', 's')[minutes>1])
if limit<1 and s>0:
result += ' %d second%s'%(s, ('', 's')[s>1])
return result[1:]
def separate_target_reason(bot, room, parameters):
target = parameters
reason = None
if not target in bot.roster[room]:
p = len(parameters)
while True:
p = parameters.rfind(' ', 0, p)
if p == -1:
if parameters.find(' ') != -1:
(target, reason) = parameters.split(' ', 1)
break
if parameters[:p] in bot.roster[room]:
target = parameters[:p]
reason = parameters[p+1:]
break
return (target, reason)
def force_directory(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
if __name__ == "__main__":
pass
|
gpl-3.0
| -5,035,402,395,565,944,000
| 34.057851
| 813
| 0.635078
| false
| 2.991537
| false
| false
| false
|
map0logo/hmm_tagging
|
bigram_tagging.py
|
1
|
10176
|
"""
Implementation of bigram part-of speech (POS) tagger based on first-order hidden
Markov models from scratch.
"""
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import codecs
POS_UNIVERSAL = ('VERB', 'NOUN', 'PRON', 'ADJ', 'ADV', 'ADP',
'CONJ', 'DET', 'NUM', 'PRT', 'X', '.')
POS_STATES = np.array(POS_UNIVERSAL)
def viterbi(i_obs, i_states, lstart_p, ltrans_p, lemit_p):
"""
Return the best path, given an HMM model and a sequence of observations
:param i_obs: index of observations in obs_states
:param i_states: index of states
:param start_p: 2D array of log initial probabilities (requires explicit reshape)
:param trans_p: 2D array of log transition probabilities
:param emit_p: 2D array of log emission probabilities
:return:
best_path: 1D array best corresponding hidden states to observations
(internal, published for debugging)
path: 2D array of best state for each step and hidden state
logV: 2D array of best log probability for each step and state
"""
""""""
n_obs = i_obs.size
n_states = i_states.size # number of states
logV = np.zeros((n_states, n_obs)) # initialise viterbi table
path = np.zeros((n_states, n_obs), dtype=np.int) # initialise the best path table
best_path = np.zeros(n_obs, dtype=np.int) # this will be your output
# B- base case
logV[:, [0]] = lstart_p + lemit_p[:, [i_obs[0]]]
path[:, 0] = i_states
# C- Inductive case
for t in xrange(1, n_obs): # loop through time
for s in xrange(0, n_states): # loop through the states @(t-1)
tp = logV[:, t-1] + ltrans_p[:, s] + lemit_p[s, i_obs[t]]
path[s, t], logV[s, t] = tp.argmax(), tp.max()
# D - Backpoint
best_path[n_obs - 1] = logV[:, n_obs - 1].argmax() # last state
for t in xrange(n_obs - 1, 0, -1): # states of (last-1)th to 0th time step
best_path[t - 1] = path[best_path[t], t]
return best_path, path, logV
def read_corpus(file_id):
"""
Read a corpus in a CLL file format with "words" and "pos" columns
:param file_id:
:return:
"""
f = open(file_id)
lines = f.readlines()
f.close()
words = [] # List of words in corpus
tags = [] # List of tags corresponding to each word
n_sents = 0 # Sentences are separated by a empty string
sents = [[]] # List of sentences. Each sentence is a list of words
t_sents = [[]] # List of corresponding tags for each word in sentences.
for line in lines:
split = line.split()
if len(split) == 2:
words.append(split[0])
tags.append(split[1])
sents[n_sents].append(split[0])
t_sents[n_sents].append(split[1])
else:
if sents[n_sents] != []:
n_sents += 1
sents.append([])
t_sents.append([])
words = np.array(words)
tags = np.array(tags)
if sents[-1] == []:
sents = sents[:-1]
t_sents = t_sents[:-1]
sents = np.array(sents)
t_sents = np.array(t_sents)
return words, tags, sents, t_sents
def read_words(file_id):
"""
Read a corpus in a CLL file format with only "words" column
:param file_id:
:return:
"""
f = open(file_id)
lines = f.readlines()
f.close()
words = []
n_sents = 0
sents = [[]]
for line in lines:
line = line.strip()
if line:
words.append(line)
sents[n_sents].append(line)
else:
if sents[n_sents] != []:
n_sents += 1
sents.append([])
words = np.array(words)
if sents[-1] == []:
sents = sents[:-1]
sents = np.array(sents)
return words, sents
def write_corpus(file_id, sents, t_sents):
"""
Writes a Corpus in CLL file format, with "words" and "pos" columns.
Inserts a empty line between sentences.
:param file_id:
:return:
"""
f = codecs.open(file_id, "w", encoding='utf-8')
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
f.write("{}\t{}\n".format(word.decode('utf-8'), t_sents[i][j]))
f.write("\n")
f.close()
def where_in_states(values, states):
"""
Return a flat array of indexes of occurrences of values array in
states array.
:param values:
:param states:
:return:
"""
return np.array([np.where(states == i) for i in values]).flatten()
def testing_viterbi():
"""
Example taken from Borodovsky & Ekisheva (2006), pp 80-81
:return:
"""
states = np.array(['H', 'L'])
i_states = np.arange(0, states.size)
obs = np.array(['G', 'G', 'C', 'A', 'C', 'T', 'G', 'A', 'A'])
obs_states = np.array(['A', 'C', 'G', 'T'])
i_obs = where_in_states(obs, obs_states)
start_p = np.array([0.5, 0.5]).reshape((states.size, 1))
trans_p = np.array([[0.5, 0.5],
[0.4, 0.6]])
emit_p = np.array([[0.2, 0.3, 0.3, 0.2],
[0.3, 0.2, 0.2, 0.3]])
lstart_p = np.log(start_p)
ltrans_p = np.log(trans_p)
lemit_p = np.log(emit_p)
best_path, path, logV = viterbi(i_obs, i_states, lstart_p, ltrans_p, lemit_p)
print(states[best_path])
print(states[path])
print(logV)
def bigrams(array):
"""
Returns an array of bigrams given a 1D array of words or tags.
:param array:
:return:
"""
return np.array([(array[i:i+2]) for i in xrange(len(array) - 1)])
def train(file_id):
"""
Estimate HMM model parameters using maximum likelihood method, i.e.
Calculating relative frequency distributions.
:param file_id: tagged corpus file in CLL format
:return:
start_p: frequency of tags of first word in each sentence.
array POS_STATES.size
trans_p: frequency of tags from one state to another for each bigram.
matrix POS_STATES.size x POS_STATES.size
emit_p: frequency of words for each tag.
matrix POS_STATES.size x unique_words.size
unique_words: array of unique words in corpus
"""
# read corpus data
words, tags, sents, t_sents = read_corpus(file_id)
t_bigrams = bigrams(tags)
# Calculate frequency of tags of first word in each sentence.
t_first = [t_sent[0] for t_sent in t_sents]
start_f = np.zeros(POS_STATES.size, dtype=np.int)
start_f = pd.DataFrame(start_f)
start_f.index = POS_STATES
for tag in t_first:
start_f.loc[tag, 0] += 1
# Calculate frequency between states in bigrams
trans_f = np.zeros((POS_STATES.size, POS_STATES.size), dtype=np.int)
trans_f = pd.DataFrame(trans_f)
trans_f.index = POS_STATES
trans_f.columns = POS_STATES
for i, j in t_bigrams:
trans_f.loc[i, j] += 1
# Calculate frequency of each word by tag
unique_words = np.unique(words)
emit_f = np.zeros((POS_STATES.size, unique_words.size), dtype=np.int)
emit_f = pd.DataFrame(emit_f)
emit_f.index = POS_STATES
emit_f.columns = unique_words
for tag, word in zip(tags, words):
emit_f.loc[tag, word] += 1
return start_f.values, trans_f.values, emit_f.values, unique_words
def freq2prob(start_f, trans_f, emit_f):
"""
Convert frequencies in probabilities
:param start_f:
:param trans_f:
:param emit_f:
:return:
"""
start_p = np.zeros(start_f.shape)
start_p = start_f / sum(start_f)
trans_p = np.zeros(trans_f.shape)
for i in xrange(POS_STATES.size):
trans_p[i, :] = trans_f[i, :] / np.sum(trans_f[i, :])
emit_p = np.zeros(emit_f.shape)
for i in xrange(POS_STATES.size):
emit_p[i, :] = emit_f[i, :] / np.sum(emit_f[i, :])
return start_p, trans_p, emit_p
def generate_model(file_id, model_id):
"""
Estimate model form data given in file_id, and save parameters in
model_id file.
:return:
"""
start_f, trans_f, emit_f, obs_states = train(file_id)
np.savez(model_id, start_f=start_f, trans_f=trans_f,
emit_f=emit_f, states=POS_STATES, obs_states=obs_states)
def add_one_smoothing(emit_f, obs_states, words):
"""
Assign frequency of one to each new word that doesn't appeared on train
data.
:param emit_p:
:param obs_states:
:param: words
:return:
"""
new_words = []
for word in words:
if not(word in obs_states) and not(word in new_words):
new_words.append(word)
obs_states = np.append(obs_states, new_words)
new_words_f = np.zeros((emit_f.shape[0], len(new_words)))
emit_f = np.append(emit_f, new_words_f, axis=1)
emit_f += 1 # Add one!
return emit_f, obs_states
def load_model(model_id):
"""
:param model_id:
:return:
"""
model = np.load("{}.npz".format(model_id))
start_f = model["start_f"]
trans_f = model["trans_f"]
emit_f = model["emit_f"]
obs_states = model["obs_states"]
return start_f, trans_f, emit_f, obs_states
def evaluate_model(file_id, start_f, trans_f, emit_f, obs_states, smooth):
"""
Evaluate model in model_id for corpus given in file_id and generate
output_id file of ConLL file format.
:param file_id: eval corpus file in CLL format, without tags
:param model_id: hmm model in npz format
:param output_id: result corpus file in CLL format
:return:
Generate new corpus file output_id in CLL format.
"""
words, sents = read_words(file_id)
i_states = np.arange(0, POS_STATES.size)
emit_f, obs_states = smooth(emit_f, obs_states, words)
start_p, trans_p, emit_p = freq2prob(start_f, trans_f, emit_f)
lstart_p = np.log(start_p.reshape((start_p.size, 1)))
ltrans_p = np.log(trans_p)
lemit_p = np.log(emit_p)
# For each sentence as observations, obtain tags using viterbi
t_sents = []
for sent in sents:
i_obs = where_in_states(sent, obs_states)
best_path, path, logV = viterbi(i_obs, i_states,
lstart_p, ltrans_p, lemit_p)
t_sents.append(POS_STATES[best_path].tolist())
return sents, t_sents
|
gpl-2.0
| -3,219,382,188,687,192,000
| 30.214724
| 86
| 0.592571
| false
| 3.085506
| false
| false
| false
|
alvinwan/tex2py
|
setup.py
|
1
|
1510
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
install_requires = ['TexSoup==0.1.4', 'pptree==2.0']
tests_require = ['pytest', 'pytest-cov==2.5.1', 'coverage == 3.7.1', 'coveralls == 1.1']
# hack
install_requires = install_requires + tests_require
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
VERSION = '0.0.5'
setup(
name = "tex2py",
version = VERSION,
author = "Alvin Wan",
author_email = 'hi@alvinwan.com',
description = ("utility converting latex into parse tree in Python"),
license = "BSD",
url = "http://github.com/alvinwan/tex2py",
packages = ['tex2py'],
cmdclass = {'test': PyTest},
tests_require = tests_require,
install_requires = install_requires + tests_require,
download_url = 'https://github.com/alvinwan/tex2py/archive/%s.zip' % VERSION,
classifiers = [
"Topic :: Utilities",
"Topic :: Utilities",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
],
)
|
bsd-2-clause
| -4,880,245,095,279,423,000
| 29.816327
| 88
| 0.635762
| false
| 3.61244
| true
| false
| false
|
pepetreshere/odoo
|
addons/website/models/website_visitor.py
|
2
|
15826
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import uuid
import pytz
from odoo import fields, models, api, _
from odoo.addons.base.models.res_partner import _tz_get
from odoo.exceptions import UserError
from odoo.tools.misc import _format_time_ago
from odoo.http import request
from odoo.osv import expression
class WebsiteTrack(models.Model):
_name = 'website.track'
_description = 'Visited Pages'
_order = 'visit_datetime DESC'
_log_access = False
visitor_id = fields.Many2one('website.visitor', ondelete="cascade", index=True, required=True, readonly=True)
page_id = fields.Many2one('website.page', index=True, ondelete='cascade', readonly=True)
url = fields.Text('Url', index=True)
visit_datetime = fields.Datetime('Visit Date', default=fields.Datetime.now, required=True, readonly=True)
class WebsiteVisitor(models.Model):
_name = 'website.visitor'
_description = 'Website Visitor'
_order = 'last_connection_datetime DESC'
name = fields.Char('Name')
access_token = fields.Char(required=True, default=lambda x: uuid.uuid4().hex, index=False, copy=False, groups='base.group_website_publisher')
active = fields.Boolean('Active', default=True)
website_id = fields.Many2one('website', "Website", readonly=True)
partner_id = fields.Many2one('res.partner', string="Linked Partner", help="Partner of the last logged in user.")
partner_image = fields.Binary(related='partner_id.image_1920')
# localisation and info
country_id = fields.Many2one('res.country', 'Country', readonly=True)
country_flag = fields.Char(related="country_id.image_url", string="Country Flag")
lang_id = fields.Many2one('res.lang', string='Language', help="Language from the website when visitor has been created")
timezone = fields.Selection(_tz_get, string='Timezone')
email = fields.Char(string='Email', compute='_compute_email_phone')
mobile = fields.Char(string='Mobile Phone', compute='_compute_email_phone')
# Visit fields
visit_count = fields.Integer('Number of visits', default=1, readonly=True, help="A new visit is considered if last connection was more than 8 hours ago.")
website_track_ids = fields.One2many('website.track', 'visitor_id', string='Visited Pages History', readonly=True)
visitor_page_count = fields.Integer('Page Views', compute="_compute_page_statistics", help="Total number of visits on tracked pages")
page_ids = fields.Many2many('website.page', string="Visited Pages", compute="_compute_page_statistics")
page_count = fields.Integer('# Visited Pages', compute="_compute_page_statistics", help="Total number of tracked page visited")
last_visited_page_id = fields.Many2one('website.page', string="Last Visited Page", compute="_compute_last_visited_page_id")
# Time fields
create_date = fields.Datetime('First connection date', readonly=True)
last_connection_datetime = fields.Datetime('Last Connection', default=fields.Datetime.now, help="Last page view date", readonly=True)
time_since_last_action = fields.Char('Last action', compute="_compute_time_statistics", help='Time since last page view. E.g.: 2 minutes ago')
is_connected = fields.Boolean('Is connected ?', compute='_compute_time_statistics', help='A visitor is considered as connected if his last page view was within the last 5 minutes.')
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique.'),
('partner_uniq', 'unique(partner_id)', 'A partner is linked to only one visitor.'),
]
@api.depends('name')
def name_get(self):
return [(
record.id,
(record.name or _('Website Visitor #%s', record.id))
) for record in self]
@api.depends('partner_id.email_normalized', 'partner_id.mobile', 'partner_id.phone')
def _compute_email_phone(self):
results = self.env['res.partner'].search_read(
[('id', 'in', self.partner_id.ids)],
['id', 'email_normalized', 'mobile', 'phone'],
)
mapped_data = {
result['id']: {
'email_normalized': result['email_normalized'],
'mobile': result['mobile'] if result['mobile'] else result['phone']
} for result in results
}
for visitor in self:
visitor.email = mapped_data.get(visitor.partner_id.id, {}).get('email_normalized')
visitor.mobile = mapped_data.get(visitor.partner_id.id, {}).get('mobile')
@api.depends('website_track_ids')
def _compute_page_statistics(self):
results = self.env['website.track'].read_group(
[('visitor_id', 'in', self.ids), ('url', '!=', False)], ['visitor_id', 'page_id', 'url'], ['visitor_id', 'page_id', 'url'], lazy=False)
mapped_data = {}
for result in results:
visitor_info = mapped_data.get(result['visitor_id'][0], {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor_info['visitor_page_count'] += result['__count']
visitor_info['page_count'] += 1
if result['page_id']:
visitor_info['page_ids'].add(result['page_id'][0])
mapped_data[result['visitor_id'][0]] = visitor_info
for visitor in self:
visitor_info = mapped_data.get(visitor.id, {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor.page_ids = [(6, 0, visitor_info['page_ids'])]
visitor.visitor_page_count = visitor_info['visitor_page_count']
visitor.page_count = visitor_info['page_count']
@api.depends('website_track_ids.page_id')
def _compute_last_visited_page_id(self):
results = self.env['website.track'].read_group([('visitor_id', 'in', self.ids)],
['visitor_id', 'page_id', 'visit_datetime:max'],
['visitor_id', 'page_id'], lazy=False)
mapped_data = {result['visitor_id'][0]: result['page_id'][0] for result in results if result['page_id']}
for visitor in self:
visitor.last_visited_page_id = mapped_data.get(visitor.id, False)
@api.depends('last_connection_datetime')
def _compute_time_statistics(self):
for visitor in self:
visitor.time_since_last_action = _format_time_ago(self.env, (datetime.now() - visitor.last_connection_datetime))
visitor.is_connected = (datetime.now() - visitor.last_connection_datetime) < timedelta(minutes=5)
def _check_for_message_composer(self):
""" Purpose of this method is to actualize visitor model prior to contacting
him. Used notably for inheritance purpose, when dealing with leads that
could update the visitor model. """
return bool(self.partner_id and self.partner_id.email)
def _prepare_message_composer_context(self):
return {
'default_model': 'res.partner',
'default_res_id': self.partner_id.id,
'default_partner_ids': [self.partner_id.id],
}
def action_send_mail(self):
self.ensure_one()
if not self._check_for_message_composer():
raise UserError(_("There are no contact and/or no email linked to this visitor."))
visitor_composer_ctx = self._prepare_message_composer_context()
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
compose_ctx = dict(
default_use_template=False,
default_composition_mode='comment',
)
compose_ctx.update(**visitor_composer_ctx)
return {
'name': _('Contact Visitor'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': compose_ctx,
}
def _get_visitor_from_request(self, force_create=False):
""" Return the visitor as sudo from the request if there is a visitor_uuid cookie.
It is possible that the partner has changed or has disconnected.
In that case the cookie is still referencing the old visitor and need to be replaced
with the one of the visitor returned !!!. """
# This function can be called in json with mobile app.
# In case of mobile app, no uid is set on the jsonRequest env.
# In case of multi db, _env is None on request, and request.env unbound.
if not request:
return None
Visitor = self.env['website.visitor'].sudo()
visitor = Visitor
access_token = request.httprequest.cookies.get('visitor_uuid')
if access_token:
visitor = Visitor.with_context(active_test=False).search([('access_token', '=', access_token)])
# Prefetch access_token and other fields. Since access_token has a restricted group and we access
# a non restricted field (partner_id) first it is not fetched and will require an additional query to be retrieved.
visitor.access_token
if not self.env.user._is_public():
partner_id = self.env.user.partner_id
if not visitor or visitor.partner_id and visitor.partner_id != partner_id:
# Partner and no cookie or wrong cookie
visitor = Visitor.with_context(active_test=False).search([('partner_id', '=', partner_id.id)])
elif visitor and visitor.partner_id:
# Cookie associated to a Partner
visitor = Visitor
if visitor and not visitor.timezone:
tz = self._get_visitor_timezone()
if tz:
visitor.timezone = tz
if not visitor and force_create:
visitor = self._create_visitor()
return visitor
def _handle_webpage_dispatch(self, response, website_page):
# get visitor. Done here to avoid having to do it multiple times in case of override.
visitor_sudo = self._get_visitor_from_request(force_create=True)
if request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
expiration_date = datetime.now() + timedelta(days=365)
response.set_cookie('visitor_uuid', visitor_sudo.access_token, expires=expiration_date)
self._handle_website_page_visit(website_page, visitor_sudo)
def _handle_website_page_visit(self, website_page, visitor_sudo):
""" Called on dispatch. This will create a website.visitor if the http request object
is a tracked website page or a tracked view. Only on tracked elements to avoid having
too much operations done on every page or other http requests.
Note: The side effect is that the last_connection_datetime is updated ONLY on tracked elements."""
url = request.httprequest.url
website_track_values = {
'url': url,
'visit_datetime': datetime.now(),
}
if website_page:
website_track_values['page_id'] = website_page.id
domain = [('page_id', '=', website_page.id)]
else:
domain = [('url', '=', url)]
visitor_sudo._add_tracking(domain, website_track_values)
if visitor_sudo.lang_id.id != request.lang.id:
visitor_sudo.write({'lang_id': request.lang.id})
def _add_tracking(self, domain, website_track_values):
""" Add the track and update the visitor"""
domain = expression.AND([domain, [('visitor_id', '=', self.id)]])
last_view = self.env['website.track'].sudo().search(domain, limit=1)
if not last_view or last_view.visit_datetime < datetime.now() - timedelta(minutes=30):
website_track_values['visitor_id'] = self.id
self.env['website.track'].create(website_track_values)
self._update_visitor_last_visit()
def _create_visitor(self):
""" Create a visitor. Tracking is added after the visitor has been created."""
country_code = request.session.get('geoip', {}).get('country_code', False)
country_id = request.env['res.country'].sudo().search([('code', '=', country_code)], limit=1).id if country_code else False
vals = {
'lang_id': request.lang.id,
'country_id': country_id,
'website_id': request.website.id,
}
tz = self._get_visitor_timezone()
if tz:
vals['timezone'] = tz
if not self.env.user._is_public():
vals['partner_id'] = self.env.user.partner_id.id
vals['name'] = self.env.user.partner_id.name
return self.sudo().create(vals)
def _link_to_partner(self, partner, update_values=None):
""" Link visitors to a partner. This method is meant to be overridden in
order to propagate, if necessary, partner information to sub records.
:param partner: partner used to link sub records;
:param update_values: optional values to update visitors to link;
"""
vals = {'name': partner.name}
if update_values:
vals.update(update_values)
self.write(vals)
def _link_to_visitor(self, target, keep_unique=True):
""" Link visitors to target visitors, because they are linked to the
same identity. Purpose is mainly to propagate partner identity to sub
records to ease database update and decide what to do with "duplicated".
THis method is meant to be overridden in order to implement some specific
behavior linked to sub records of duplicate management.
:param target: main visitor, target of link process;
:param keep_unique: if True, find a way to make target unique;
"""
# Link sub records of self to target partner
if target.partner_id:
self._link_to_partner(target.partner_id)
# Link sub records of self to target visitor
self.website_track_ids.write({'visitor_id': target.id})
if keep_unique:
self.unlink()
return target
def _cron_archive_visitors(self):
delay_days = int(self.env['ir.config_parameter'].sudo().get_param('website.visitor.live.days', 30))
deadline = datetime.now() - timedelta(days=delay_days)
visitors_to_archive = self.env['website.visitor'].sudo().search([('last_connection_datetime', '<', deadline)])
visitors_to_archive.write({'active': False})
def _update_visitor_last_visit(self):
""" We need to do this part here to avoid concurrent updates error. """
try:
with self.env.cr.savepoint():
query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
self.env.cr.execute(query_lock, (self.id,), log_exceptions=False)
date_now = datetime.now()
query = "UPDATE website_visitor SET "
if self.last_connection_datetime < (date_now - timedelta(hours=8)):
query += "visit_count = visit_count + 1,"
query += """
active = True,
last_connection_datetime = %s
WHERE id = %s
"""
self.env.cr.execute(query, (date_now, self.id), log_exceptions=False)
except Exception:
pass
def _get_visitor_timezone(self):
tz = request.httprequest.cookies.get('tz') if request else None
if tz in pytz.all_timezones:
return tz
elif not self.env.user._is_public():
return self.env.user.tz
else:
return None
|
agpl-3.0
| -45,463,958,355,934,510
| 48.611285
| 185
| 0.623784
| false
| 4.012677
| false
| false
| false
|
marco-mariotti/selenoprofiles
|
libraries/networkx/algorithms/flow/mincost.py
|
1
|
26221
|
# -*- coding: utf-8 -*-
"""
Minimum cost flow algorithms on directed connected graphs.
"""
__author__ = """Loïc Séguin-C. <loicseguin@gmail.com>"""
# Copyright (C) 2010 Loïc Séguin-C. <loicseguin@gmail.com>
# All rights reserved.
# BSD license.
__all__ = ['network_simplex',
'min_cost_flow_cost',
'min_cost_flow',
'cost_of_flow',
'max_flow_min_cost']
import networkx as nx
def _initial_tree_solution(G, r, demand = 'demand', weight = 'weight'):
"""Find a initial tree solution rooted at r.
The initial tree solution is obtained by considering edges (r, v)
for all nodes v with non-negative demand and (v, r) for all nodes
with negative demand. If these edges do not exist, we add them to
the graph and call them artificial edges.
"""
H = nx.DiGraph(G)
T = nx.DiGraph()
y = {r: 0}
artificialEdges = []
flowCost = 0
n = G.number_of_nodes()
try:
maxWeight = max(abs(d[weight]) for u, v, d in G.edges(data = True)
if weight in d)
except ValueError:
maxWeight = 0
hugeWeight = 1 + n * maxWeight
for v, d in G.nodes(data = True)[1:]:
vDemand = d.get(demand, 0)
if vDemand >= 0:
if not (r, v) in G.edges():
H.add_edge(r, v, {weight: hugeWeight, 'flow': vDemand})
artificialEdges.append((r, v))
else: # (r, v) in G.edges()
H[r][v]['flow'] = vDemand
y[v] = H[r][v].get(weight, 0)
T.add_edge(r, v)
flowCost += vDemand * H[r][v].get(weight, 0)
else: # vDemand < 0
if not (v, r) in G.edges():
H.add_edge(v, r, {weight: hugeWeight, 'flow': -vDemand})
artificialEdges.append((v, r))
else:
H[v][r]['flow'] = -vDemand
y[v] = -H[v][r].get(weight, 0)
T.add_edge(v, r)
flowCost += -vDemand * H[v][r].get(weight, 0)
return H, T, y, artificialEdges, flowCost
def _find_entering_edge(H, c, capacity = 'capacity'):
"""Find an edge which creates a negative cost cycle in the actual
tree solution.
The reduced cost of every edge gives the value of the cycle
obtained by adding that edge to the tree solution. If that value is
negative, we will augment the flow in the direction indicated by
the edge. Otherwise, we will augment the flow in the reverse
direction.
If no edge is found, return and empty tuple. This will cause the
main loop of the algorithm to terminate.
"""
newEdge = ()
for u, v, d in H.edges_iter(data = True):
if d.get('flow', 0) == 0:
if c[(u, v)] < 0:
newEdge = (u, v)
break
else:
if capacity in d:
if (d.get('flow', 0) == d[capacity]
and c[(u, v)] > 0):
newEdge = (u, v)
break
return newEdge
def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity'):
"""Find an edge that will leave the basis and the value by which we
can increase or decrease the flow on that edge.
The leaving arc rule is used to prevent cycling.
If cycle has no reverse edge and no forward edge of finite
capacity, it means that cycle is a negative cost infinite capacity
cycle. This implies that the cost of a flow satisfying all demands
is unbounded below. An exception is raised in this case.
"""
eps = False
leavingEdge = ()
# Find the forward edge with the minimum value for capacity - 'flow'
# and the reverse edge with the minimum value for 'flow'.
for index, u in enumerate(cycle[:-1]):
edgeCapacity = False
edge = ()
v = cycle[index + 1]
if (u, v) in T.edges() + [newEdge]: #forward edge
if capacity in H[u][v]: # edge (u, v) has finite capacity
edgeCapacity = H[u][v][capacity] - H[u][v].get('flow', 0)
edge = (u, v)
else: #reverse edge
edgeCapacity = H[v][u].get('flow', 0)
edge = (v, u)
# Determine if edge might be the leaving edge.
if edge:
if leavingEdge:
if edgeCapacity < eps:
eps = edgeCapacity
leavingEdge = edge
else:
eps = edgeCapacity
leavingEdge = edge
if not leavingEdge:
raise nx.NetworkXUnbounded(
"Negative cost cycle of infinite capacity found. "
+ "Min cost flow unbounded below.")
return leavingEdge, eps
def _create_flow_dict(G):
"""Creates the flow dict of dicts of graph G."""
flowDict = {}
for u in G.nodes_iter():
if not u in flowDict:
flowDict[u] = {}
for v in G.neighbors(u):
flowDict[u][v] = G[u][v].get('flow', 0)
return flowDict
def network_simplex(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find a minimum cost flow satisfying all demands in digraph G.
This is a primal network simplex algorithm that uses the leaving
arc rule to prevent cycling.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost
24
>>> flowDict
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
The mincost flow algorithm can also be used to solve shortest path
problems. To find the shortest path between two nodes u and v,
give all edges an infinite capacity, give node u a demand of -1 and
node v a demand a 1. Then run the network simplex. The value of a
min cost flow will be the distance between u and v and edges
carrying positive flow will indicate the path.
>>> G=nx.DiGraph()
>>> G.add_weighted_edges_from([('s','u',10), ('s','x',5),
... ('u','v',1), ('u','x',2),
... ('v','y',1), ('x','u',3),
... ('x','v',5), ('x','y',2),
... ('y','s',7), ('y','v',6)])
>>> G.add_node('s', demand = -1)
>>> G.add_node('v', demand = 1)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost == nx.shortest_path_length(G, 's', 'v', weighted = True)
True
>>> [(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]
[('x', 'u'), ('s', 'x'), ('u', 'v')]
>>> nx.shortest_path(G, 's', 'v', weighted = True)
['s', 'x', 'u', 'v']
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node('p', spam = -4)
>>> G.add_node('q', spam = 2)
>>> G.add_node('a', spam = -2)
>>> G.add_node('d', spam = -1)
>>> G.add_node('t', spam = 2)
>>> G.add_node('w', spam = 3)
>>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
>>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
>>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
>>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
>>> G.add_edge('a', 't', cost = 2, vacancies = 4)
>>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
>>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
>>> flowCost, flowDict = nx.network_simplex(G, demand = 'spam',
... capacity = 'vacancies',
... weight = 'cost')
>>> flowCost
37
>>> flowDict
{'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
References
----------
W. J. Cook, W. H. Cunningham, W. R. Pulleyblank and A. Schrijver.
Combinatorial Optimization. Wiley-Interscience, 1998.
"""
if not G.is_directed():
raise nx.NetworkXError("Undirected graph not supported (yet).")
if not nx.is_connected(G.to_undirected()):
raise nx.NetworkXError("Not connected graph not supported (yet).")
if sum(d[demand] for v, d in G.nodes(data = True)
if demand in d) != 0:
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
# Fix an arbitrarily chosen root node and find an initial tree solution.
r = G.nodes()[0]
H, T, y, artificialEdges, flowCost = \
_initial_tree_solution(G, r, demand = demand, weight = weight)
# Initialize the reduced costs.
c = {}
for u, v, d in H.edges_iter(data = True):
c[(u, v)] = d.get(weight, 0) + y[u] - y[v]
# Print stuff for debugging.
# print('-' * 78)
# nbIter = 0
# print('Iteration %d' % nbIter)
# nbIter += 1
# print('Tree solution: %s' % T.edges())
# print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
# for u, v, d in H.edges(data = True):
# flag = ''
# if (u, v) in artificialEdges:
# flag = '*'
# print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
# c[(u, v)]))
# print('Distances: %s' % y)
# Main loop.
while True:
newEdge = _find_entering_edge(H, c, capacity = capacity)
if not newEdge:
break # Optimal basis found. Main loop is over.
cycleCost = abs(c[newEdge])
# Find the cycle created by adding newEdge to T.
path1 = nx.shortest_path(T.to_undirected(), r, newEdge[0])
path2 = nx.shortest_path(T.to_undirected(), r, newEdge[1])
join = r
for index, node in enumerate(path1[1:]):
if index + 1 < len(path2) and node == path2[index + 1]:
join = node
else:
break
path1 = path1[path1.index(join):]
path2 = path2[path2.index(join):]
cycle = []
if H[newEdge[0]][newEdge[1]].get('flow', 0) == 0:
path2.reverse()
cycle = path1 + path2
else: # newEdge is at capacity
path1.reverse()
cycle = path2 + path1
# Find the leaving edge. Will stop here if cycle is an infinite
# capacity negative cost cycle.
leavingEdge, eps = _find_leaving_edge(H, T, cycle, newEdge,
capacity = capacity)
# Actual augmentation happens here. If eps = 0, don't bother.
if eps:
flowCost -= cycleCost * eps
for index, u in enumerate(cycle[:-1]):
v = cycle[index + 1]
if (u, v) in T.edges() + [newEdge]:
H[u][v]['flow'] = H[u][v].get('flow', 0) + eps
else: # (v, u) in T.edges():
H[v][u]['flow'] -= eps
# Update tree solution.
T.add_edge(*newEdge)
T.remove_edge(*leavingEdge)
# Update distances and reduced costs.
if newEdge != leavingEdge:
forest = nx.DiGraph(T)
forest.remove_edge(*newEdge)
R, notR = nx.connected_component_subgraphs(forest.to_undirected())
if r in notR.nodes(): # make sure r is in R
R, notR = notR, R
if newEdge[0] in R.nodes():
for v in notR.nodes():
y[v] += c[newEdge]
else:
for v in notR.nodes():
y[v] -= c[newEdge]
for u, v in H.edges():
if u in notR.nodes() or v in notR.nodes():
c[(u, v)] = H[u][v].get(weight, 0) + y[u] - y[v]
# Print stuff for debugging.
# print('-' * 78)
# print('Iteration %d' % nbIter)
# nbIter += 1
# print('Tree solution: %s' % T.edges())
# print('New edge: (%s, %s)' % (newEdge[0], newEdge[1]))
# print('Leaving edge: (%s, %s)' % (leavingEdge[0], leavingEdge[1]))
# print('Cycle: %s' % cycle)
# print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
# for u, v, d in H.edges(data = True):
# flag = ''
# if (u, v) in artificialEdges:
# flag = '*'
# print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
# c[(u, v)]))
# print('Distances: %s' % y)
# If an artificial edge has positive flow, the initial problem was
# not feasible.
for u, v in artificialEdges:
if H[u][v]['flow'] != 0:
raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
H.remove_edge(u, v)
flowDict = _create_flow_dict(H)
return flowCost, flowDict
def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find the cost of a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost = nx.min_cost_flow_cost(G)
>>> flowCost
24
"""
return network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[0]
def min_cost_flow(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Return a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowDict = nx.min_cost_flow(G)
>>> flowDict
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
"""
return network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[1]
def cost_of_flow(G, flowDict, weight = 'weight'):
"""Compute the cost of the flow given by flowDict on graph G.
Note that this function does not check for the validity of the
flow flowDict. This function will fail if the graph G and the
flow don't have the same edge set.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Returns
-------
cost: Integer, float
The total cost of the flow. This is given by the sum over all
edges of the product of the edge's flow and the edge's weight.
See also
--------
max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
"""
return sum((flowDict[u][v] * d.get(weight, 0)
for u, v, d in G.edges_iter(data = True)))
def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'):
"""Return a maximum (s, t)-flow of minimum cost.
G is a digraph with edge costs and capacities. There is a source
node s and a sink node t. This function finds a maximum flow from
s to t whose total cost is minimized.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
s: node label
Source of the flow.
t: node label
Destination of the flow.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnbounded
This exception is raised if there is an infinite capacity path
from s to t in G. In this case there is no maximum flow. This
exception is also raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
is unbounded below.
See also
--------
cost_of_flow, ford_fulkerson, min_cost_flow, min_cost_flow_cost,
network_simplex
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edges_from([(1, 2, {'capacity': 12, 'weight': 4}),
... (1, 3, {'capacity': 20, 'weight': 6}),
... (2, 3, {'capacity': 6, 'weight': -3}),
... (2, 6, {'capacity': 14, 'weight': 1}),
... (3, 4, {'weight': 9}),
... (3, 5, {'capacity': 10, 'weight': 5}),
... (4, 2, {'capacity': 19, 'weight': 13}),
... (4, 5, {'capacity': 4, 'weight': 0}),
... (5, 7, {'capacity': 28, 'weight': 2}),
... (6, 5, {'capacity': 11, 'weight': 1}),
... (6, 7, {'weight': 8}),
... (7, 4, {'capacity': 6, 'weight': 6})])
>>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
>>> nx.cost_of_flow(G, mincostFlow)
373
>>> maxFlow = nx.ford_fulkerson_flow(G, 1, 7)
>>> nx.cost_of_flow(G, maxFlow)
428
>>> mincostFlowValue = (sum((mincostFlow[u][7] for u in G.predecessors(7)))
... - sum((mincostFlow[7][v] for v in G.successors(7))))
>>> mincostFlowValue == nx.max_flow(G, 1, 7)
True
"""
maxFlow = nx.max_flow(G, s, t, capacity = capacity)
H = nx.DiGraph(G)
H.add_node(s, demand = -maxFlow)
H.add_node(t, demand = maxFlow)
return min_cost_flow(H, capacity = capacity, weight = weight)
|
gpl-2.0
| 4,067,730,387,553,061,400
| 35.821629
| 104
| 0.56265
| false
| 3.754404
| false
| false
| false
|
DavidPowell/OpenModes
|
openmodes/helpers.py
|
1
|
6055
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import functools
import uuid
import weakref
import numpy as np
import numbers
from collections import defaultdict
import six
def inc_slice(s, inc):
"""Increment a slice so that it starts at the current stop, and the current
stop is incremented by some amount"""
return slice(s.stop, s.stop+inc)
class cached_property(object):
"""
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Deleting the attribute resets the property.
Taken from https://github.com/pydanny/cached-property/blob/master/cached_property.py
Original source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
Copyright under MIT License
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class MeshError(Exception):
"An exeception indicating a failure generating or reading the mesh"
pass
class Identified(object):
"""An object which can be uniquely identified by an id number. It is
assumed that any object which subclasses Identified is immutable, so that
its id can be used for caching complex results which depend on this object.
"""
def __init__(self):
self.id = uuid.uuid4()
def __hash__(self):
return self.id.__hash__()
def __eq__(self, other):
return hasattr(other, 'id') and (self.id == other.id)
def __repr__(self):
"Represent the object by its id, in addition to its memory address"
return ("<%s at 0x%08x with id %s>" % (str(self.__class__)[8:-2],
id(self),
str(self.id)))
class PicklableRef(object):
"""A weak reference which can be pickled. This is achieved by
creating a strong reference to the object at pickling time, then restoring
the weak reference when unpickling. Note that unless the object being
referenced is also pickled and referenced after unpickling, the weak
reference will be dead after unpickling.
"""
def __init__(self, obj, callback=None):
self.ref = weakref.ref(obj, callback)
def __call__(self):
return self.ref()
def __getstate__(self):
return {'ref': self.ref()}
def __setstate__(self, state):
self.ref = weakref.ref(state['ref'])
def memoize(obj):
"""A simple decorator to memoize function calls. Pays particular attention
to numpy arrays and objects which are subclasses of Identified. It is
assumed that in such cases, the object does not change if its `id` is the
same"""
cache = obj.cache = {}
def get_key(item):
if isinstance(item, (six.string_types, numbers.Number)):
return item
elif isinstance(item, Identified):
return str(item.id)
elif isinstance(item, np.ndarray):
return item.tostring()
else:
return str(item)
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key_arg = tuple(get_key(arg) for arg in args)
key_kwarg = tuple((kw, get_key(arg)) for (kw, arg)
in kwargs.items())
key = (key_arg, key_kwarg)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def equivalence(relations):
"""Determine the equivalence classes between objects
Following numerical recipes section 8.6
Parameters
----------
relations: list
Each element of the list is a tuple containing the identities of two
equivalent items. Each item can be any hashable type
Returns
-------
class_items: list of set
Each set
"""
# first put each item in its own equivalence class
classes = {}
for j, k in relations:
classes[j] = j
classes[k] = k
for relation in relations:
j, k = relation
# track the anscestor of each
while classes[j] != j:
j = classes[j]
while classes[k] != k:
k = classes[k]
# if not already related, then relate items
if j != k:
classes[j] = k
# The final sweep
for j in classes.keys():
while classes[j] != classes[classes[j]]:
classes[j] = classes[classes[j]]
# Now reverse the storage arrangement, so that all items of the same
# class are grouped together into a set
classes_reverse = defaultdict(set)
for item, item_class in classes.items():
classes_reverse[item_class].add(item)
# the class names are arbitrary, so just return the list of sets
return list(classes_reverse.values())
def wrap_if_constant(func):
"""If passed a constant, wrap it in a function. If passed a function, just
return it as is"""
if hasattr(func, '__call__'):
return func
else:
return lambda x: func
|
gpl-3.0
| -6,340,580,178,052,489,000
| 30.536458
| 103
| 0.61635
| false
| 4.172984
| false
| false
| false
|
drewcsillag/skunkweb
|
pylibs/skunkdoc/scanners/common.py
|
1
|
1465
|
#
# Copyright (C) 2001 Andrew T. Csillag <drew_csillag@geocities.com>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
import sys
import string
import ParseSkunkDoc
def doDocString(s):
"""**if <code>s</code> starts with '**', it contains xml markup, so don't
do anything to it (except trim off the '**', otherwise, xml escape it and
return it"""
if s is None:
return ""
# Made ** able to occur after whitespace at start of docstring
s = string.strip(s)
if s[:2] == '**':
s = s[2:]
try:
ParseSkunkDoc.parseString(s)
except:
sys.stderr.write('error parsing XML doc string %s, treating '
'as plaintext\n' % s)
s = '<pre>%s</pre>' % string.replace(plainEscape(s), '&', '&')
else:
s = '<pre>%s</pre>' % string.replace(plainEscape(s), '&', '&')
return '%s' % s
def plainEscape( s ):
'''**xml escape the string <code>s</code>'''
ns = []
for c in s:
if c == '&': ns.append('&')
elif c == '<': ns.append('<')
elif c == '>': ns.append('>')
elif c in ('\n', '\r', '\t'): ns.append(c)
elif c == '"': ns.append('"')
elif ord(c) < 32 or c > 'z': ns.append('&#%d;' % ord(c))
else: ns.append(c)
return string.join(ns, '')
|
gpl-2.0
| -243,526,687,297,408,400
| 30.847826
| 78
| 0.526962
| false
| 3.367816
| false
| false
| false
|
rave-engine/rave
|
modules/opengl/core3/texture.py
|
1
|
3655
|
from OpenGL import GL
import numpy
import ctypes
from . import shaders
class Texture:
__slots__ = ('width', 'height', 'data', 'texture')
def __init__(self, width, height, data):
self.width = width
self.height = height
self.data = data
self.texture = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA8, self.width, self.height, 0, GL.GL_BGRA, GL.GL_UNSIGNED_INT_8_8_8_8, ctypes.cast(self.data, ctypes.c_void_p))
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
def bind(self):
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)
def unbind(self):
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
class Image:
FRAGMENT = """
#version 330 core
in vec2 v_texcoord;
out vec4 o_color;
uniform sampler2D u_tex;
void main(void) {
o_color = texture(u_tex, v_texcoord);
}
""".strip()
VERTEX = """
#version 330 core
in vec2 a_vertex;
in vec2 a_texcoord;
out vec2 v_texcoord;
void main(void) {
gl_Position = vec4(a_vertex, 0.0, 1.0);
v_texcoord = a_texcoord;
v_texcoord.y = -v_texcoord.y;
}""".strip()
def __init__(self, tex):
self.vertexes = numpy.array([
1000.0 , -1800.0,
-1000.0 , -1800.0,
1000.0 , 1800.0,
-1000.0 ,1800.0,
-1000.0 , -1800.0,
1000.0 , 1800.0,
], dtype='float32')
self.texcoords = numpy.array([
1280.0 , -720.0,
-1280.0 , -720.0,
1280.0 , 720.0,
-1280.0 , 720.0,
-1280.0 , -720.0,
1280.0 , 720.0,
], dtype='float32')
self.tex = tex
self.program = shaders.ShaderProgram(fragment=self.FRAGMENT, vertex=self.VERTEX)
self.program.compile()
self.vao = GL.glGenVertexArrays(1)
self.vertex_vbo, self.texcoords_vbo = GL.glGenBuffers(2)
self.program.use()
GL.glBindVertexArray(self.vao)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vertex_vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(self.vertexes) * 2 * 4, self.vertexes, GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(self.program.get_index('a_vertex'))
GL.glVertexAttribPointer(self.program.get_index('a_vertex'), 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.texcoords_vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(self.texcoords) * 2 * 4, self.texcoords, GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(self.program.get_index('a_texcoord'))
GL.glVertexAttribPointer(self.program.get_index('a_texcoord'), 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
def render(self, target):
self.program.use()
self.tex.bind()
GL.glEnable(GL.GL_BLEND);
GL.glBlendFunc(GL.GL_SRC_ALPHA,GL.GL_ONE_MINUS_SRC_ALPHA);
GL.glUniform1i(self.program.get_index('u_tex'), 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.tex.texture)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 6)
#GL.glBindVertexArray(0)
#GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
|
bsd-2-clause
| 4,593,637,646,586,049,500
| 33.158879
| 166
| 0.59617
| false
| 2.889328
| false
| false
| false
|
puolival/multipy
|
multipy/scripts/analyze_data.py
|
1
|
1952
|
# -*- encoding: utf-8 -*-
"""Script for analyzing data from the simulated primary and follow-up
experiments."""
# Allow importing modules from parent directory.
import sys
sys.path.append('..')
from fdr import lsu, tst, qvalue
from fwer import bonferroni, sidak, hochberg, holm_bonferroni
from permutation import tfr_permutation_test
import numpy as np
from repeat import fwer_replicability as repl
from util import grid_model_counts as counts
"""Load the simulated datasets."""
fpath = '/home/puolival/multipy_data'
fname_primary = fpath + '/primary.npy'
fname_followup = fname_primary.replace('primary', 'follow-up')
print('Loading simulated datasets ..')
primary_data, followup_data = (np.load(fname_primary),
np.load(fname_followup))
print('Done.')
# Extract p-values
pvals_pri, pvals_fol = (primary_data.flat[0]['pvals'],
followup_data.flat[0]['pvals'])
# Extract raw data for permutation testing
rvs_a_pri, rvs_b_pri = (primary_data.flat[0]['rvs_a'],
primary_data.flat[0]['rvs_b'])
rvs_a_fol, rvs_b_fol = (followup_data.flat[0]['rvs_a'],
followup_data.flat[0]['rvs_b'])
"""Define analysis parameters."""
n_iterations, n_effect_sizes, nl, _ = np.shape(pvals_pri)
emph_primary = 0.1
alpha = 0.05
method = qvalue
sl = 30 # TODO: save to .npy file.
"""Compute reproducibility rates."""
rr = np.zeros([n_iterations, n_effect_sizes])
for ind in np.ndindex(n_iterations, n_effect_sizes):
print('Analysis iteration %3d' % (1+ind[0]))
replicable = repl(pvals_pri[ind].flatten(), pvals_fol[ind].flatten(),
emph_primary, method, alpha)
replicable = np.reshape(replicable, [nl, nl])
rr[ind] = counts(replicable, nl, sl)[0]
"""Save data to disk."""
output_fpath = fpath
output_fname = output_fpath + ('/result-%s.npy' % method.__name__)
np.save(output_fname, {'rr': rr})
print('Results saved to disk.')
|
bsd-3-clause
| -2,987,823,949,373,830,000
| 30.483871
| 73
| 0.656762
| false
| 3.148387
| false
| false
| false
|
aESeguridad/GERE
|
venv/lib/python2.7/site-packages/weasyprint/layout/markers.py
|
1
|
2049
|
# coding: utf-8
"""
weasyprint.layout.markers
-------------------------
Layout for list markers (for ``display: list-item``).
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .percentages import resolve_percentages
from .replaced import image_marker_layout
from ..text import split_first_line
from ..formatting_structure import boxes
def list_marker_layout(context, box):
"""Lay out the list markers of ``box``."""
# List markers can be either 'inside' or 'outside'.
# Inside markers are layed out just like normal inline content, but
# outside markers need specific layout.
# TODO: implement outside markers in terms of absolute positioning,
# see CSS3 lists.
marker = getattr(box, 'outside_list_marker', None)
if marker:
resolve_percentages(marker, containing_block=box)
if isinstance(marker, boxes.TextBox):
(marker.pango_layout, _, _, marker.width, marker.height,
marker.baseline) = split_first_line(
marker.text, marker.style, context.enable_hinting,
max_width=None, line_width=None)
else:
# Image marker
image_marker_layout(marker)
# Align the top of the marker box with the top of its list-item’s
# content-box.
# TODO: align the baselines of the first lines instead?
marker.position_y = box.content_box_y()
# ... and its right with the left of its list-item’s padding box.
# (Swap left and right for right-to-left text.)
marker.position_x = box.border_box_x()
half_em = 0.5 * box.style.font_size
direction = box.style.direction
if direction == 'ltr':
marker.margin_right = half_em
marker.position_x -= marker.margin_width()
else:
marker.margin_left = half_em
marker.position_x += box.border_width()
|
gpl-3.0
| 6,302,328,897,577,311,000
| 36.181818
| 78
| 0.627384
| false
| 4.025591
| false
| false
| false
|
am0d/bit
|
bit/utils.py
|
1
|
1479
|
# Utility functions for various actions
import os
import sys
import time
import shutil
import hashlib
from bit.instance import bit
from bit.cprint import error, warning
def hash(file_name):
try:
with open(file_name, 'rb') as hashable:
algo = hashlib.new(bit.options.hash_type)
algo.update(hashable.read())
return algo.hexdigest()
except IOError:
error('Could not hash: {0}'.format(file_name))
def is_exe(filepath):
return os.path.exists(filepath) and os.access(filepath, os.X_OK)
def which(program_name):
if sys.platform == 'win32':
program_name = '{0}.exe'.format(program_name)
filepath = os.path.split(program_name)[0]
if filepath:
if is_exe(program_name):
return program_name
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program_name)
if is_exe(exe_file):
return exe_file
raise Exception('Could not find {0} on the system path'.format(program_name))
def flatten(list_name, containers=(list, tuple)):
if isinstance(list_name, containers):
if len(list_name) < 1:
return []
else:
return reduce(lambda x, y : x + y, map(flatten, list_name))
else:
return [list_name]
def fix_strings(file_list):
if sys.platform == 'win32':
file_list = [item.replace('\\', '/') for item in file_list]
return file_list
|
bsd-3-clause
| 7,232,347,731,415,690,000
| 28.58
| 82
| 0.615957
| false
| 3.633907
| false
| false
| false
|
Schibum/naclports
|
lib/naclports/pkg_info.py
|
1
|
3150
|
# Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shlex
import string
from naclports.error import PkgFormatError
VALID_KEYS = ['NAME', 'VERSION', 'URL', 'ARCHIVE_ROOT', 'LICENSE', 'DEPENDS',
'MIN_SDK_VERSION', 'LIBC', 'DISABLED_LIBC', 'ARCH', 'CONFLICTS',
'DISABLED_ARCH', 'URL_FILENAME', 'BUILD_OS', 'SHA1', 'DISABLED',
'DISABLED_TOOLCHAIN']
REQUIRED_KEYS = ['NAME', 'VERSION']
def ParsePkgInfo(contents, filename, valid_keys=None, required_keys=None):
"""Parse a string contains the contents of a pkg_info file.
Args:
contents: pkg_info contents as a string.
filename: name of file to use in error messages.
valid_keys: list of keys that are valid in the file.
required_keys: list of keys that are required in the file.
Returns:
A dictionary of the key, value pairs contained in the pkg_info file.
Raises:
PkgFormatError: if file is malformed, contains invalid keys, or does not
contain all required keys.
"""
rtn = {}
if valid_keys is None:
valid_keys = VALID_KEYS
if required_keys is None:
required_keys = REQUIRED_KEYS
def ParsePkgInfoLine(line, line_no):
if '=' not in line:
raise PkgFormatError('Invalid info line %s:%d' % (filename, line_no))
key, value = line.split('=', 1)
key = key.strip()
if key not in valid_keys:
raise PkgFormatError("Invalid key '%s' in info file %s:%d" % (key,
filename,
line_no))
value = value.strip()
if value[0] == '(':
if value[-1] != ')':
raise PkgFormatError('Error parsing %s:%d: %s (%s)' % (filename,
line_no,
key,
value))
value = value[1:-1].split()
else:
value = shlex.split(value)[0]
return (key, value)
def ExpandVars(value, substitutions):
if type(value) == str:
return string.Template(value).substitute(substitutions)
else:
return [string.Template(v).substitute(substitutions) for v in value]
for i, line in enumerate(contents.splitlines()):
if not line or line[0] == '#':
continue
key, raw_value = ParsePkgInfoLine(line, i+1)
if key in rtn:
raise PkgFormatError('Error parsing %s:%d: duplicate key (%s)' %
(filename, i+1, key))
rtn[key] = ExpandVars(raw_value, rtn)
for required_key in required_keys:
if required_key not in rtn:
raise PkgFormatError("Required key '%s' missing from info file: '%s'" %
(required_key, filename))
return rtn
def ParsePkgInfoFile(filename, valid_keys=None, required_keys=None):
"""Parse pkg_info from a file on disk."""
with open(filename) as f:
return ParsePkgInfo(f.read(), filename, valid_keys, required_keys)
|
bsd-3-clause
| -8,466,547,247,953,835,000
| 35.627907
| 78
| 0.578413
| false
| 3.972257
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/operations/_operations.py
|
1
|
4791
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2017_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerRegistry/operations'} # type: ignore
|
mit
| 7,840,496,075,989,891,000
| 42.954128
| 133
| 0.643498
| false
| 4.633462
| false
| false
| false
|
romain-fontugne/RTTanalysis
|
dpgmm.py
|
1
|
8500
|
import numpy as np
import glob
import dpcluster as dpc
import pandas as pd
import os
import sys
try:
import matplotlib.pylab as plt
import matplotlib as mpl
except Exception, e:
sys.stderr.write("Matplotlib is not available!")
def loadData(filename, format="rttEstimate"):
"""Load a csv file in memory.
:returns: pandas DataFrame with the file data
"""
if format=="rttEstimate":
df = pd.read_csv(filename, sep=",", header=None, names=["ip", "peer", "rtt", "dstMac"])
elif format=="thomas":
# the filename is a directory containing several RTT measurements
# ..../ipSrc/ipDst/flowID/hour
data = []
for fi in glob.glob(filename):
tmp = pd.read_csv(fi, sep="\t", comment="s", header=None,
names=["rtt", "start_sec", "start_msec", "end_sec", "end_msec"],
usecols=["rtt","start_sec"])
val = fi.split("/")
tmp["ip"] = "{0}->{1}".format(val[-4], val[-3])
data.append(tmp)
df = pd.concat(data)
# The ip addresses become the index
df = df.set_index("ip")
return df
def clusterRTToverTime(rttEstimates, timeBin="60", outputDirectory="./rttDistributions/",
minEstimates=10, plot=True, logNormal=True):
"""For each IP address, find the different RTT distributions for each time
bin and plot the average value of each distribution.
"""
# for each IP in the traffic
ips = rttEstimates.index.unique()
for ip in ips:
start = rttEstimates[rttEstimates.index == ip].start_sec.min()
end = rttEstimates[rttEstimates.index == ip].start_sec.max()
dataIP = rttEstimates[rttEstimates.index == ip]
x = []
y = []
z = []
i = 0
for ts in range(start,end,timeBin):
if logNormal:
data = np.log10(dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt)
else:
data = dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt
# Look only at flows containing a certain number of RTT estimates
if len(data) < minEstimates:
sys.stderr("Ignoring data!! not enough samples!")
continue
# Cluster the data
vdp = dpgmm(data)
if vdp is None:
continue
params = NIWparam2Nparam(vdp)
if logNormal:
mean, std = logNormalMeanStdDev(params[0, :], params[1, :])
else:
mean = params[0, :]
std = params[1, :]
for mu, sig in zip(mean, std):
y.append(mu)
z.append(sig)
x.append(ts)
# Plot the clusters characteristics in a file
plt.figure()
plt.errorbar(x,y,yerr=z,fmt="o")
plt.grid(True)
if logNormal:
plt.savefig("{0}/{1}_timeBin{2}sec_logNormal.eps".format(outputDirectory, ip, timeBin))
else:
plt.savefig("{0}/{1}_timeBin{2}sec_normal.eps".format(outputDirectory, ip, timeBin))
def clusterRttPerIP(rttEstimates, outputDirectory="./rttDistributions/", minEstimates=10, plot=True, logNormal=False):
"""For each IP address, find the different RTT distributions and write
their mean and standard deviation in files.
"""
# for each IP in the traffic
ips = rttEstimates.index.unique()
for ip in ips:
if logNormal:
data = np.log10(rttEstimates[rttEstimates.index == ip].rtt)
else:
data = rttEstimates[rttEstimates.index == ip].rtt
# Look only at flows containing a certain number of RTT estimates
if len(data) < minEstimates:
continue
# Cluster the data
vdp = dpgmm(data)
if vdp is None:
continue
# Write the clusters characteristics in a file
fi = open("{0}/{1}.csv".format(outputDirectory, ip), "w")
params = NIWparam2Nparam(vdp)
if logNormal:
mean, std = logNormalMeanStdDev(params[0, :], params[1, :])
else:
mean = params[0, :]
std = params[1, :]
for mu, sig in zip(mean, std):
fi.write("{0},{1}\n".format(mu, sig))
if plot:
plotRttDistribution(rttEstimates, ip, "{0}/{1}.eps".format(outputDirectory, ip))
def NIWparam2Nparam(vdp, minClusterIPRatio=0.05):
"""
Convert Gaussian Normal-Inverse-Wishart parameters to the usual Gaussian
parameters (i.e. mean, standard deviation)
:vdp: Variational Dirichlet Process obtained from dpgmm
:minClusterIPRatio: Ignore distributions standing for a ratio of IPs lower
than minClusterIPRatio
"""
nbIPs = float(np.sum(vdp.cluster_sizes()))
mus, Sgs, k, nu = vdp.distr.prior.nat2usual(vdp.cluster_parameters()[
vdp.cluster_sizes() > (minClusterIPRatio * nbIPs), :])[0]
Sgs = Sgs / (k + 1 + 1)[:, np.newaxis, np.newaxis]
res = np.zeros( (len(mus), 2) )
for i, (mu, Sg) in enumerate(zip(mus, Sgs)):
w, V = np.linalg.eig(Sg)
V = np.array(np.matrix(V) * np.matrix(np.diag(np.sqrt(w))))
V = V[0]
res[i] = (mu[0], V[0])
return res
def logNormalMeanStdDev(loc, scale):
"""Compute the mean and standard deviation from the location and scale
parameter of a lognormal distribution.
:loc: location parameter of a lognormal distribution
:scale: scale parameter of a lognmormal distribution
:return: (mean,stdDev) the mean and standard deviation of the distribution
"""
mu = 10 ** (loc + ((scale ** 2) / 2.0))
var = (10 ** (scale ** 2) -1) * 10 ** (2 * loc + scale ** 2)
return mu, np.sqrt(var)
def dpgmm(data, priorWeight=0.1, maxClusters=32, thresh=1e-3, maxIter=10000):
"""
Compute the Variational Inference for Dirichlet Process Mixtures
on the given data.
:data: 1D array containing the data to cluster
:priorWeight: likelihood-prior distribution pair governing clusters.
:maxClusters: Maximum number of clusters
:
"""
data = np.array(data).reshape(-1, 1)
vdp = dpc.VDP(dpc.distributions.GaussianNIW(1), w=priorWeight, k=maxClusters, tol=thresh, max_iters=maxIter)
stats = vdp.distr.sufficient_stats(data)
vdp.batch_learn(stats)
return vdp
def plotRttDistribution(rttEstimates, ip, filename, nbBins=500, logscale=False):
"""Plot the RTT distribution of an IP address
:rttEstimates: pandas DataFrame containing the RTT estimations
:ip: IP address to plot
:filename: Filename for the plot
:nbBins: Number of bins in the histogram
:logscale: Plot RTTs in logscale if set to True
:returns: None
"""
if logscale:
data = np.log10(rttEstimates[rttEstimates.index == ip].rtt)
else:
data = rttEstimates[rttEstimates.index == ip].rtt
h, b=np.histogram(data, nbBins, normed=True)
plt.figure(1, figsize=(9, 3))
plt.clf()
ax = plt.subplot()
x = b[:-1]
ax.plot(x, h, "k")
ax.grid(True)
plt.title("%s (%s RTTs)" % (ip, len(data)))
if logscale:
plt.xlabel("log10(RTT)")
else:
plt.xlabel("RTT")
plt.ylabel("pdf")
minorLocator = mpl.ticker.MultipleLocator(10)
ax.xaxis.set_minor_locator(minorLocator)
plt.tight_layout()
plt.savefig(filename)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python {0} rtt.csv [outputDirectory]".format(sys.argv[0]))
filename = sys.argv[1]
if len(sys.argv) > 2:
outputDirectory = sys.argv[2]
# Create the output directory if it doesn't exist
if not os.path.exists(outputDirectory):
os.mkdir(outputDirectory)
if filename.endswith(".csv"):
# Get RTT data from given file
rtt = loadData(filename, format="rttEstimate")
# Sample RTT estimates: samplingRate=0.1 means that 10% of the
# estimates will be used
samplingRate = 0.1
if samplingRate:
rtt = rtt.sample(frac=samplingRate)
# Find RTT distributions for each IP address
clusterRttPerIP(rtt, outputDirectory, logNormal=False)
else:
# Get RTT data from given file
rtt = loadData(filename, format="thomas")
# Find RTT distributions over time
clusterRTToverTime(rtt, 600, outputDirectory, logNormal=False)
#clusterRttPerIP(rtt, outputDirectory)
|
gpl-2.0
| -7,810,559,939,886,170,000
| 31.319392
| 118
| 0.601765
| false
| 3.521127
| true
| false
| false
|
WuShichao/computational-physics
|
2/figure_2_2/figure_2_2.py
|
1
|
1143
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 15:52:36 2016
P23 figure2.2的源程序
@author: nightwing
"""
import matplotlib.pyplot as plt
DENSITY = 1.29 #空气密度(kg/m3)
C = 1.0 #阻力系数
A = 0.33 #截面积(m2)
M = 70.0 #人车质量(kg)
v1 = 4.0 #(无阻力)速度(m/s)
v2 = 4.0 #(有阻力)速度(m/s)
P = 400.0 #功率(w)
t = 0 #初始时间
t_max = 200 #截止时间(s)
dt = 0.1 #时间间隔
time = [] #此列表存储时间
velocity1 = [] #此列表存储无空气阻力时的速度
velocity2 = [] #此列表存储有空气阻力时的速度
#---欧拉法计算自行车运动速度---
while t <= t_max:
velocity1.append(v1)
velocity2.append(v2)
time.append(t)
v1 += P/(M*v1)*dt
v2 += P/(M*v2)*dt-C*DENSITY*A*v2**2/(2*M)*dt
t += dt
#------------绘图---------------
plt.title("Bicycling simulation: velocity vs. time")
plt.xlabel("time (s)")
plt.ylabel("velocity (m/s)")
plt.plot(time,velocity1,"k-",label="No air resistence")
plt.plot(time,velocity2,"k--",label="With air resistence")
plt.legend(loc=2)
plt.show()
|
gpl-3.0
| -5,247,181,144,916,056,000
| 22.8
| 59
| 0.560463
| false
| 1.828846
| false
| false
| false
|
owen-chen/wireless-testing-platform
|
wtp/DeviceUtils.py
|
1
|
3223
|
# -*- coding: utf-8 -*-
'''
Created on May 21, 2015
@author: chenchen
'''
import os
from CommonLib import callCommand
class DeviceUtils:
processlock = '/sdcard/processlock.pid'
""" 根据手机序列号获取手机产品型号 """
@staticmethod
def getProductBySerial(serial):
return callCommand("adb -s %s shell getprop ro.product.model" % serial)[0].strip()
""" 获取手机分辨率 """
@staticmethod
def getResolutionBySerial(serial):
resolution_cmd = 'adb -s %s shell dumpsys display | grep DisplayDeviceInfo' % serial
rlt = callCommand(resolution_cmd)[0].strip()
return rlt[rlt.find(':') + 1:rlt.find('}')].split(',')[0].strip()
""" 获取手机安卓版本信息 """
@staticmethod
def getEditionBySerial(serial):
return callCommand('adb -s %s shell getprop ro.build.version.release' % serial)[0].strip()
""" 获取手机内存信息,返回内存大小和可用内存大小 """
@staticmethod
def getMemoryParameterBySerial(serial):
memory_result = callCommand('adb -s %s shell df | grep data' % serial)[0].strip().split()
return memory_result[1], memory_result[3]
""" 判断手机是否插入sim卡,主要根据imsi号进行判断 """
@staticmethod
def getSimStateBySerial(serial):
service_state = callCommand('adb -s %s shell dumpsys telephony.registry | grep mServiceState' % serial)[0].strip().split()[0].split('=')[1]
return int(service_state) == 1;
""" 将手机中的文件保存至电脑中 """
@staticmethod
def pullFileFromDevice(serial, source, target):
callCommand('adb -s %s pull %s %s' % (serial, source, target))
""" 将源文件拷贝至指定手机上的目标路径下 """
@staticmethod
def pushFileToTargetPath(serial, source, target):
callCommand('adb -s %s push %s %s' % (serial, source, target))
""" 创建文件到指定手机上的目标路径下 """
@staticmethod
def lockDevice(serial):
callCommand('adb -s %s shell touch %s' % (serial, DeviceUtils.processlock))
""" 在指定手机上的目标路径下删除文件 """
@staticmethod
def unlockDevice(serial):
callCommand('adb -s %s shell rm %s' % (serial, DeviceUtils.processlock))
""" 判断指定手机上的目标路径的指定文件是否存在 """
@staticmethod
def isDeviceLocked(serial):
processlock = DeviceUtils.processlock
return callCommand('adb -s %s shell ls %s | grep %s' % (serial, processlock[0:processlock.rindex('/') + 1], processlock[processlock.rindex('/') + 1:]))
""" 将本地文件夹传入手机中对应的文件夹,且按照本地文件夹的结构传入新文件夹 """
@staticmethod
def pushFolderToDevice(serial, source, target):
file_list = os.listdir(source)
for sub_file in file_list:
local_file = os.path.join(source, sub_file)
if os.path.isfile(local_file):
DeviceUtils.pushFileToTargetPath(serial, local_file, target + '/' + sub_file)
else:
DeviceUtils.pushFolderToDevice(serial, local_file, target + '/' + sub_file)
|
gpl-2.0
| -2,518,703,167,617,819,000
| 34.949367
| 159
| 0.63156
| false
| 2.896939
| false
| false
| false
|
antoinedube/django-spine-news-display
|
Server/RadioCanada/infrastructure.py
|
1
|
1266
|
from django.utils import timezone
from urllib.request import urlopen
import xml.etree.ElementTree as et
import json
from RadioCanada.models import NewsItem
class Downloader:
def __init__(self):
pass
def fetch(self):
f = urlopen('http://rss.radio-canada.ca/fils/nouvelles/nouvelles.xml')
page_content = f.read().decode("utf-8")
return self.parse_xml(page_content)
def parse_xml(self,xml_string):
tree = et.fromstring(xml_string)
elements = []
for child in tree.iter('item'):
fetch_time = timezone.now()
title = child.find('title').text
link = child.find('link').text
description = child.find('description').text
image_link = child.find('enclosure').attrib['url']
news_item = dict({
'fetch_time': fetch_time,
'title': title,
'link': link,
'image_link': image_link,
'description': description
})
new_element = NewsItem(**news_item)
elements.append(new_element)
try:
new_element.save()
except IntegrityError as e:
print(e)
return elements
|
gpl-3.0
| 1,697,606,006,735,934,000
| 26.521739
| 78
| 0.548973
| false
| 4.234114
| false
| false
| false
|
weltliteratur/vossanto
|
theof/graph.py
|
1
|
3086
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Reads source-modifier pairs and outputs a graphviz .dot file.
#
# Usage:
#
# Author: rja
#
# Changes:
# 2018-08-17 (rja)
# - initial version
import fileinput
import math
import random
from collections import Counter
def filter_graph(sources, modifiers, edges):
filtered_sources = dict()
filtered_modifiers = dict()
filtered_edges = dict()
for source, modifier in edges:
# filter edges
if sources[source] > 14 and modifiers[modifier] > 0:
filtered_edges[(source, modifier)] = edges[(source, modifier)]
filtered_sources[source] = sources[source]
filtered_modifiers[modifier] = modifiers[modifier]
return filtered_sources, filtered_modifiers, filtered_edges
def build_graph(input):
sources = Counter()
modifiers = Counter()
edges = Counter()
for line in input:
parts = line.strip().split('\t')
if len(parts) == 2:
source, modifier = parts
# count
sources[source] += 1
modifiers[modifier] += 1
# add edge
edges[(source, modifier)] += 1
return sources, modifiers, edges
def escape_label(s):
return s.replace('"', '\\"')
# see https://stackoverflow.com/questions/28999287/generate-random-colors-rgb/28999469
def random_color():
levels = range(128,256,16)
return "#" + "".join(["{:02x}".format(random.choice(levels)) for _ in range(3)])
def print_graph(sources, modifiers, edges):
print("digraph D {")
# config
print(' graph [outputorder="edgesfirst",overlap=false,sep="+10,10"];')
print(' node [fontname="Arial",style=filled];')
# vertices
vertices = dict()
vid = 0
colormap = dict()
for source in sources:
vid += 1
# store vertex
vertices["s_" + source] = vid
# store color
color = random_color()
colormap[vid] = color
# attributes
weight = sources[source]
fs = max(weight, 10)
print(vid, '[label="' + escape_label(source) + '",width=' + str(math.log(weight) + 1) + ',fontsize=' + str(fs) + ',color="' + color + '"];')
for modifier in modifiers:
vid += 1
vertices["m_" + modifier] = vid
weight = modifiers[modifier]
fs = max(weight, 10)
print(vid, '[label="' + escape_label(modifier) + '", color="yellow", width=' + str(math.log(weight) + 1) + ',fontsize=' + str(fs) + '];')
# edges
for source, modifier in edges:
sid = vertices["s_" + source]
mid = vertices["m_" + modifier]
weight = edges[(source, modifier)]
print(sid, "->", mid, '[weight=' + str(weight) + ',penwidth=' + str(weight + 1) + ',color="' + colormap[sid] + '"];')
print("}")
if __name__ == '__main__':
# build graph
sources, modifiers, edges = build_graph(fileinput.input())
# filter graph
sources, modifiers, edges = filter_graph(sources, modifiers, edges)
# print graph
print_graph(sources, modifiers, edges)
|
gpl-3.0
| 2,003,152,647,947,842,300
| 29.254902
| 148
| 0.579391
| false
| 3.772616
| false
| false
| false
|
dmitriy0611/django
|
django/db/migrations/executor.py
|
1
|
10302
|
from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
migrations_to_run = {m[0] for m in plan}
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
# Holds all states right before a migration is applied
# if the migration is being run.
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
# Phase 1 -- Store all project states of migrations right before they
# are applied. The first migration that will be applied in phase 2 will
# trigger the rendering of the initial project state. From this time on
# models will be recursively reloaded as explained in
# `django.db.migrations.state.get_related_models_recursive()`.
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration whose state was already computed
# from the set below (`migrations_to_run.remove(migration)`).
# If no states for migrations must be computed, we can exit
# this loop. Migrations that occur after the latest migration
# that is about to be applied would only trigger unneeded
# mutate_state() calls.
break
do_run = migration in migrations_to_run
if do_run:
if 'apps' not in state.__dict__:
state.apps # Render all real_apps -- performance critical
states[migration] = state.clone()
migrations_to_run.remove(migration)
# Only preserve the state if the migration is being run later
state = migration.mutate_state(state, preserve=do_run)
if self.progress_callback:
self.progress_callback("render_success")
# Phase 2 -- Run the migrations
for migration, backwards in plan:
if not backwards:
self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial)
else:
self.unapply_migration(states[migration], migration, fake=fake)
self.check_replacements()
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
# Bail if the migration isn't the first one in its app
if [name for app, name in migration.dependencies if app == migration.app_label]:
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_migration = False
# Make sure all create model are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_migration = True
# If we get this far and we found at least one CreateModel migration,
# the migration is considered implicitly applied.
return found_create_migration, after_state
|
bsd-3-clause
| 917,353,421,160,323,000
| 46.256881
| 115
| 0.599981
| false
| 4.767237
| false
| false
| false
|
psychopy/versions
|
psychopy/core.py
|
1
|
5931
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Basic functions, including timing, rush (imported), quit
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, division, print_function
from builtins import object
import sys
import threading
import subprocess
import shlex
import locale
# some things are imported just to be accessible within core's namespace
from psychopy.clock import (MonotonicClock, Clock, CountdownTimer,
wait, monotonicClock, getAbsTime,
StaticPeriod) # pylint: disable=W0611
# always safe to call rush, even if its not going to do anything for a
# particular OS
from psychopy.platform_specific import rush # pylint: disable=W0611
from psychopy import logging
from psychopy.constants import STARTED, NOT_STARTED, FINISHED, PY3
try:
import pyglet
havePyglet = True
# may not want to check, to preserve terminal window focus
checkPygletDuringWait = True
except ImportError:
havePyglet = False
checkPygletDuringWait = False
try:
import glfw
haveGLFW = True
except ImportError:
haveGLFW = False
runningThreads = [] # just for backwards compatibility?
openWindows = [] # visual.Window updates this, event.py and clock.py use it
# Set getTime in core to == the monotonicClock instance created in the
# clockModule.
# The logging module sets the defaultClock to == clock.monotonicClock,
# so by default the core.getTime() and logging.defaultClock.getTime()
# functions return the 'same' timebase.
#
# This way 'all' OSs have a core.getTime() timebase that starts at 0.0 when
# the experiment is launched, instead of it being this way on Windows only
# (which was also a descripancy between OS's when win32 was using time.clock).
def getTime(applyZero = True):
"""Get the current time since psychopy.core was loaded.
Version Notes: Note that prior to PsychoPy 1.77.00 the behaviour of
getTime() was platform dependent (on OSX and linux it was equivalent to
:func:`psychopy.core.getAbsTime`
whereas on windows it returned time since loading of the module, as now)
"""
return monotonicClock.getTime(applyZero)
def quit():
"""Close everything and exit nicely (ending the experiment)
"""
# pygame.quit() # safe even if pygame was never initialised
logging.flush()
for thisThread in threading.enumerate():
if hasattr(thisThread, 'stop') and hasattr(thisThread, 'running'):
# this is one of our event threads - kill it and wait for success
thisThread.stop()
while thisThread.running == 0:
pass # wait until it has properly finished polling
sys.exit(0) # quits the python session entirely
def shellCall(shellCmd, stdin='', stderr=False, env=None, encoding=None):
"""Call a single system command with arguments, return its stdout.
Returns stdout, stderr if stderr is True.
Handles simple pipes, passing stdin to shellCmd (pipes are untested
on windows) can accept string or list as the first argument
Parameters
----------
shellCmd : str, or iterable
The command to execute, and its respective arguments.
stdin : str, or None
Input to pass to the command.
stderr : bool
Whether to return the standard error output once execution is finished.
env : dict
The environment variables to set during execution.
encoding : str
The encoding to use for communication with the executed command.
This argument will be ignored on Python 2.7.
Notes
-----
We use ``subprocess.Popen`` to execute the command and establish
`stdin` and `stdout` pipes.
Python 2.7 always opens the pipes in text mode; however,
Python 3 defaults to binary mode, unless an encoding is specified.
To unify pipe communication across Python 2 and 3, we now provide an
`encoding` parameter, enforcing `utf-8` text mode by default.
This parameter is present from Python 3.6 onwards; using an older
Python 3 version will raise an exception. The parameter will be ignored
when running Python 2.7.
"""
if encoding is None:
encoding = locale.getpreferredencoding()
if type(shellCmd) == str:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd)
elif type(shellCmd) == bytes:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd.decode('utf-8'))
elif type(shellCmd) in (list, tuple): # handles whitespace in filenames
shellCmdList = shellCmd
else:
msg = 'shellCmd requires a string or iterable.'
raise TypeError(msg)
cmdObjects = []
for obj in shellCmdList:
if type(obj) != bytes:
cmdObjects.append(obj)
else:
cmdObjects.append(obj.decode('utf-8'))
# Since Python 3.6, we can use the `encoding` parameter.
if PY3:
if sys.version_info.minor >= 6:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding, env=env)
else:
msg = 'shellCall() requires Python 2.7, or 3.6 and newer.'
raise RuntimeError(msg)
else:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
stdoutData, stderrData = proc.communicate(stdin)
del proc
if stderr:
return stdoutData.strip(), stderrData.strip()
else:
return stdoutData.strip()
|
gpl-3.0
| 8,450,671,426,704,044,000
| 34.303571
| 79
| 0.669364
| false
| 4.206383
| false
| false
| false
|
ianblenke/awsebcli
|
ebcli/bundled/botocore/utils.py
|
1
|
18227
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import datetime
import hashlib
import math
import binascii
from six import string_types, text_type
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.compat import json, quote, zip_longest
from botocore.vendored import requests
from botocore.compat import OrderedDict
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 2986, section 5.2.4 "Remove Dot Segments"
output = []
while url:
if url.startswith('../'):
url = url[3:]
elif url.startswith('./'):
url = url[2:]
elif url.startswith('/./'):
url = '/' + url[3:]
elif url.startswith('/../'):
url = '/' + url[4:]
if output:
output.pop()
elif url.startswith('/..'):
url = '/' + url[3:]
if output:
output.pop()
elif url.startswith('/.'):
url = '/' + url[2:]
elif url == '.' or url == '..':
url = ''
elif url.startswith('//'):
# As far as I can tell, this is not in the RFC,
# but AWS auth services require consecutive
# slashes are removed.
url = url[1:]
else:
if url[0] == '/':
next_slash = url.find('/', 1)
else:
next_slash = url.find('/', 0)
if next_slash == -1:
output.append(url)
url = ''
else:
output.append(url[:next_slash])
url = url[next_slash:]
return ''.join(output)
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a value
# based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if not current_key in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key with
# an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
def _get_request(self, url, timeout, num_attempts=1):
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except (requests.Timeout, requests.ConnectionError) as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError as e:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
"""
if not isinstance(input_str, string_types):
input_str = text_type(input_str)
return quote(text_type(input_str).encode('utf-8'), safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input shape.
The specific values used are place holder values. For strings an
empty string is used, for numbers 0 or 0.0 is used. The intended
usage of this class is to generate the *shape* of the input structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self):
pass
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(member_shape,
stack)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
return [
self._generate_skeleton(shape.member, stack),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
|
apache-2.0
| 4,503,787,993,518,301,700
| 33.390566
| 82
| 0.600208
| false
| 4.281654
| false
| false
| false
|
jepler/linuxcnc-mirror
|
tests/tool-info/random-no-startup-tool/test-ui.py
|
1
|
8177
|
#!/usr/bin/env python
import linuxcnc
import hal
import math
import time
import sys
import subprocess
import os
import signal
import glob
import re
def wait_for_linuxcnc_startup(status, timeout=10.0):
"""Poll the Status buffer waiting for it to look initialized,
rather than just allocated (all-zero). Returns on success, throws
RuntimeError on failure."""
start_time = time.time()
while time.time() - start_time < timeout:
status.poll()
if (status.angular_units == 0.0) \
or (status.axes == 0) \
or (status.axis_mask == 0) \
or (status.cycle_time == 0.0) \
or (status.exec_state != linuxcnc.EXEC_DONE) \
or (status.interp_state != linuxcnc.INTERP_IDLE) \
or (status.inpos == False) \
or (status.linear_units == 0.0) \
or (status.max_acceleration == 0.0) \
or (status.max_velocity == 0.0) \
or (status.program_units == 0.0) \
or (status.rapidrate == 0.0) \
or (status.state != linuxcnc.STATE_ESTOP) \
or (status.task_state != linuxcnc.STATE_ESTOP):
time.sleep(0.1)
else:
# looks good
return
# timeout, throw an exception
raise RuntimeError
def verify_interp_vars(state, current_tool, current_pocket, selected_tool, selected_pocket):
c.mdi('(debug,current_tool=#<_current_tool> current_pocket=#<_current_pocket> selected_tool=#<_selected_tool> selected_pocket=#<_selected_pocket>)')
c.wait_complete()
expected = "current_tool=%.6f current_pocket=%.6f selected_tool=%.6f selected_pocket=%.6f" % (current_tool, current_pocket, selected_tool, selected_pocket)
while True:
result = e.poll()
if result == None:
print "nothing from polling error channel"
sys.exit(1)
(type, msg) = result
if type == linuxcnc.OPERATOR_DISPLAY:
if msg == expected:
# success!
break
print "state='%s', unexpected interp variables" % state
print "result:", msg
print "expected:", expected
sys.exit(1)
else:
print "state='%s', ignoring unexpected error type %d: %s" % (state, type, msg)
print "state='%s', got expected interp variables:" % state
print " current_tool=%.6f" % current_tool
print " current_pocket=%.6f" % current_pocket
print " selected_tool=%.6f" % selected_tool
print " selected_pocket=%.6f" % selected_pocket
def verify_io_pins(state, tool_number, tool_prep_number, tool_prep_pocket):
if h['tool-number'] != tool_number:
print "state=%s, expected io.tool-number=%d, got %d" % (state, tool_number, h['tool-number'])
sys.exit(1)
if h['tool-prep-number'] != tool_prep_number:
print "state=%s, expected io.tool-prep-number=%d, got %d" % (state, tool_prep_number, h['tool-prep-number'])
sys.exit(1)
if h['tool-prep-pocket'] != tool_prep_pocket:
print "state=%s, expected io.tool-prep-pocket=%d, got %d" % (state, tool_prep_pocket, h['tool-prep-pocket'])
sys.exit(1)
print "state='%s', got expected io pins:" % state
print " tool-number=%d" % tool_number
print " tool-prep-number=%d" % tool_prep_number
print " tool-prep-pocket=%d" % tool_prep_pocket
def verify_status_buffer(state, tool_in_spindle):
s.poll()
if s.tool_in_spindle != tool_in_spindle:
print "state=%s, expected status.tool_in_spindle=%d, got %d" % (state, tool_in_spindle, s.tool_in_spindle)
sys.exit(1)
print "state='%s', got expected status buffer fields:" % state
print " tool_in_spindle=%d" % tool_in_spindle
def wait_for_hal_pin(pin_name, value, timeout=10):
start = time.time()
while time.time() < (start + timeout):
if h[pin_name] == value:
return
time.sleep(0.1)
print "timeout waiting for hal pin %s to go to %s!" % (pin_name, value)
sys.exit(1)
c = linuxcnc.command()
s = linuxcnc.stat()
e = linuxcnc.error_channel()
h = hal.component("test-ui")
h.newpin("tool-number", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prep-number", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prep-pocket", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prepare", hal.HAL_BIT, hal.HAL_IN)
h.newpin("tool-prepared", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("tool-change", hal.HAL_BIT, hal.HAL_IN)
h.newpin("tool-changed", hal.HAL_BIT, hal.HAL_OUT)
h['tool-prepared'] = False
h['tool-changed'] = False
h.ready()
hal.connect('test-ui.tool-number', 'tool-number')
hal.connect('test-ui.tool-prep-number', 'tool-prep-number')
hal.connect('test-ui.tool-prep-pocket', 'tool-prep-pocket')
hal.connect('test-ui.tool-prepare', 'tool-prepare')
hal.connect('test-ui.tool-prepared', 'tool-prepared')
hal.connect('test-ui.tool-change', 'tool-change')
hal.connect('test-ui.tool-changed', 'tool-changed')
# Wait for LinuxCNC to initialize itself so the Status buffer stabilizes.
wait_for_linuxcnc_startup(s)
c.state(linuxcnc.STATE_ESTOP_RESET)
c.state(linuxcnc.STATE_ON)
c.home(-1)
c.wait_complete()
c.mode(linuxcnc.MODE_MDI)
#
# Starting state should be sane.
#
verify_status_buffer(state='init', tool_in_spindle=-1)
verify_io_pins(state='init', tool_number=-1, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='init', current_tool=-1, current_pocket=2, selected_tool=0, selected_pocket=-1)
#
# After "T1" prepares the tool.
#
c.mdi('T1')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T1', tool_in_spindle=-1)
verify_io_pins(state='after T1', tool_number=-1, tool_prep_number=1, tool_prep_pocket=1)
verify_interp_vars(state='after T1', current_tool=-1, current_pocket=2, selected_tool=1, selected_pocket=1)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T1 M6', tool_in_spindle=1)
verify_io_pins(state='after T1 M6', tool_number=1, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T1 M6', current_tool=1, current_pocket=0, selected_tool=1, selected_pocket=-1)
#
# After "T10" prepares the tool.
#
c.mdi('T10')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T10', tool_in_spindle=1)
verify_io_pins(state='after T10', tool_number=1, tool_prep_number=10, tool_prep_pocket=3)
verify_interp_vars(state='after T10', current_tool=1, current_pocket=0, selected_tool=10, selected_pocket=3)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T10 M6', tool_in_spindle=10)
verify_io_pins(state='after T10 M6', tool_number=10, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T10 M6', current_tool=10, current_pocket=0, selected_tool=10, selected_pocket=-1)
#
# After "T99999" prepares a tool.
#
c.mdi('T99999')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T99999', tool_in_spindle=10)
verify_io_pins(state='after T99999', tool_number=10, tool_prep_number=99999, tool_prep_pocket=50)
verify_interp_vars(state='after T99999', current_tool=10, current_pocket=0, selected_tool=99999, selected_pocket=50)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T99999 M6', tool_in_spindle=99999)
verify_io_pins(state='after T99999 M6', tool_number=99999, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T99999 M6', current_tool=99999, current_pocket=0, selected_tool=99999, selected_pocket=-1)
sys.exit(0)
|
lgpl-2.1
| -7,770,383,162,234,791,000
| 30.817121
| 159
| 0.660756
| false
| 2.841209
| true
| false
| false
|
fluo-io/fluo-deploy
|
lib/tests/ec2/test_config.py
|
1
|
6124
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from muchos.config import Ec2DeployConfig
def test_ec2_cluster():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
assert c.checksum_ver("accumulo", "1.9.0") == (
"sha256:"
"f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe"
)
assert c.checksum("accumulo") == (
"sha256:"
"df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47"
)
assert c.get("ec2", "default_instance_type") == "m5d.large"
assert c.get("ec2", "worker_instance_type") == "m5d.large"
assert c.get("ec2", "aws_ami") == "ami-9887c6e7"
assert c.user_home() == "/home/centos"
assert c.max_ephemeral() == 1
assert c.mount_root() == "/media/ephemeral"
assert c.fstype() == "ext3"
assert c.force_format() == "no"
assert c.worker_data_dirs() == ["/media/ephemeral0"]
assert c.default_data_dirs() == ["/media/ephemeral0"]
assert c.metrics_drive_ids() == ["media-ephemeral0"]
assert c.shutdown_delay_minutes() == "0"
assert c.mounts(2) == ["/media/ephemeral0", "/media/ephemeral1"]
assert c.node_type_map() == {
"default": {
"mounts": ["/media/ephemeral0"],
"devices": ["/dev/nvme1n1"],
},
"worker": {
"mounts": ["/media/ephemeral0"],
"devices": ["/dev/nvme1n1"],
},
}
assert c.node_type("worker1") == "worker"
assert c.node_type("leader1") == "default"
assert not c.has_option("ec2", "vpc_id")
assert not c.has_option("ec2", "subnet_id")
assert c.get("ec2", "key_name") == "my_aws_key"
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node("leader1") == [
"namenode",
"resourcemanager",
"accumulomaster",
"zookeeper",
]
assert c.get_node("leader2") == ["metrics"]
assert c.get_node("worker1") == ["worker", "swarmmanager"]
assert c.get_node("worker2") == ["worker"]
assert c.get_node("worker3") == ["worker"]
assert c.has_service("accumulomaster")
assert not c.has_service("fluo")
assert c.get_service_hostnames("worker") == [
"worker1",
"worker2",
"worker3",
"worker4",
]
assert c.get_service_hostnames("zookeeper") == ["leader1"]
assert c.get_hosts() == {
"leader2": ("10.0.0.1", None),
"leader1": ("10.0.0.0", "23.0.0.0"),
"worker1": ("10.0.0.2", None),
"worker3": ("10.0.0.4", None),
"worker2": ("10.0.0.3", None),
"worker4": ("10.0.0.5", None),
}
assert c.get_public_ip("leader1") == "23.0.0.0"
assert c.get_private_ip("leader1") == "10.0.0.0"
assert c.cluster_name == "mycluster"
assert c.get_cluster_type() == "ec2"
assert c.version("accumulo").startswith("2.")
assert c.version("fluo").startswith("1.")
assert c.version("hadoop").startswith("3.")
assert c.version("zookeeper").startswith("3.")
assert c.get_service_private_ips("worker") == [
"10.0.0.2",
"10.0.0.3",
"10.0.0.4",
"10.0.0.5",
]
assert c.get("general", "proxy_hostname") == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get("general", "cluster_user") == "centos"
assert c.get("general", "cluster_group") == "centos"
assert c.get_non_proxy() == [
("10.0.0.1", "leader2"),
("10.0.0.2", "worker1"),
("10.0.0.3", "worker2"),
("10.0.0.4", "worker3"),
("10.0.0.5", "worker4"),
]
assert c.get_host_services() == [
("leader1", "namenode resourcemanager accumulomaster zookeeper"),
("leader2", "metrics"),
("worker1", "worker swarmmanager"),
("worker2", "worker"),
("worker3", "worker"),
("worker4", "worker"),
]
def test_case_sensitive():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
assert c.has_option("ec2", "default_instance_type")
assert not c.has_option("ec2", "Default_instance_type")
c.set("nodes", "CamelCaseWorker", "worker,fluo")
c.init_nodes()
assert c.get_node("CamelCaseWorker") == ["worker", "fluo"]
def test_ec2_cluster_template():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
c.set("ec2", "cluster_template", "example")
c.init_template("../conf/templates")
# init_template already calls validate_template, so just ensure that
# we've loaded all the expected dictionary items from the example
assert "accumulomaster" in c.cluster_template_d
assert "client" in c.cluster_template_d
assert "metrics" in c.cluster_template_d
assert "namenode" in c.cluster_template_d
assert "resourcemanager" in c.cluster_template_d
assert "worker" in c.cluster_template_d
assert "zookeeper" in c.cluster_template_d
assert "devices" in c.cluster_template_d
|
apache-2.0
| -129,113,717,068,885,630
| 35.452381
| 74
| 0.595526
| false
| 3.199582
| false
| false
| false
|
jabesq/home-assistant
|
homeassistant/components/amcrest/camera.py
|
1
|
17637
|
"""Support for Amcrest IP cameras."""
import asyncio
from datetime import timedelta
import logging
from urllib3.exceptions import HTTPError
from amcrest import AmcrestError
import voluptuous as vol
from homeassistant.components.camera import (
Camera, CAMERA_SERVICE_SCHEMA, SUPPORT_ON_OFF, SUPPORT_STREAM)
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
CONF_NAME, STATE_ON, STATE_OFF)
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_stream, async_aiohttp_proxy_web,
async_get_clientsession)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
CAMERA_WEB_SESSION_TIMEOUT, CAMERAS, DATA_AMCREST, DEVICES, SERVICE_UPDATE)
from .helpers import log_update_error, service_signal
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=15)
STREAM_SOURCE_LIST = [
'snapshot',
'mjpeg',
'rtsp',
]
_SRV_EN_REC = 'enable_recording'
_SRV_DS_REC = 'disable_recording'
_SRV_EN_AUD = 'enable_audio'
_SRV_DS_AUD = 'disable_audio'
_SRV_EN_MOT_REC = 'enable_motion_recording'
_SRV_DS_MOT_REC = 'disable_motion_recording'
_SRV_GOTO = 'goto_preset'
_SRV_CBW = 'set_color_bw'
_SRV_TOUR_ON = 'start_tour'
_SRV_TOUR_OFF = 'stop_tour'
_ATTR_PRESET = 'preset'
_ATTR_COLOR_BW = 'color_bw'
_CBW_COLOR = 'color'
_CBW_AUTO = 'auto'
_CBW_BW = 'bw'
_CBW = [_CBW_COLOR, _CBW_AUTO, _CBW_BW]
_SRV_GOTO_SCHEMA = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(_ATTR_PRESET): vol.All(vol.Coerce(int), vol.Range(min=1)),
})
_SRV_CBW_SCHEMA = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(_ATTR_COLOR_BW): vol.In(_CBW),
})
CAMERA_SERVICES = {
_SRV_EN_REC: (CAMERA_SERVICE_SCHEMA, 'async_enable_recording', ()),
_SRV_DS_REC: (CAMERA_SERVICE_SCHEMA, 'async_disable_recording', ()),
_SRV_EN_AUD: (CAMERA_SERVICE_SCHEMA, 'async_enable_audio', ()),
_SRV_DS_AUD: (CAMERA_SERVICE_SCHEMA, 'async_disable_audio', ()),
_SRV_EN_MOT_REC: (
CAMERA_SERVICE_SCHEMA, 'async_enable_motion_recording', ()),
_SRV_DS_MOT_REC: (
CAMERA_SERVICE_SCHEMA, 'async_disable_motion_recording', ()),
_SRV_GOTO: (_SRV_GOTO_SCHEMA, 'async_goto_preset', (_ATTR_PRESET,)),
_SRV_CBW: (_SRV_CBW_SCHEMA, 'async_set_color_bw', (_ATTR_COLOR_BW,)),
_SRV_TOUR_ON: (CAMERA_SERVICE_SCHEMA, 'async_start_tour', ()),
_SRV_TOUR_OFF: (CAMERA_SERVICE_SCHEMA, 'async_stop_tour', ()),
}
_BOOL_TO_STATE = {True: STATE_ON, False: STATE_OFF}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
async_add_entities([
AmcrestCam(name, device, hass.data[DATA_FFMPEG])], True)
class AmcrestCam(Camera):
"""An implementation of an Amcrest IP camera."""
def __init__(self, name, device, ffmpeg):
"""Initialize an Amcrest camera."""
super().__init__()
self._name = name
self._api = device.api
self._ffmpeg = ffmpeg
self._ffmpeg_arguments = device.ffmpeg_arguments
self._stream_source = device.stream_source
self._resolution = device.resolution
self._token = self._auth = device.authentication
self._control_light = device.control_light
self._is_recording = False
self._motion_detection_enabled = None
self._brand = None
self._model = None
self._audio_enabled = None
self._motion_recording_enabled = None
self._color_bw = None
self._rtsp_url = None
self._snapshot_lock = asyncio.Lock()
self._unsub_dispatcher = []
self._update_succeeded = False
async def async_camera_image(self):
"""Return a still image response from the camera."""
available = self.available
if not available or not self.is_on:
_LOGGER.warning(
'Attempt to take snaphot when %s camera is %s', self.name,
'offline' if not available else 'off')
return None
async with self._snapshot_lock:
try:
# Send the request to snap a picture and return raw jpg data
response = await self.hass.async_add_executor_job(
self._api.snapshot)
return response.data
except (AmcrestError, HTTPError) as error:
log_update_error(
_LOGGER, 'get image from', self.name, 'camera', error)
return None
async def handle_async_mjpeg_stream(self, request):
"""Return an MJPEG stream."""
# The snapshot implementation is handled by the parent class
if self._stream_source == 'snapshot':
return await super().handle_async_mjpeg_stream(request)
if not self.available:
_LOGGER.warning(
'Attempt to stream %s when %s camera is offline',
self._stream_source, self.name)
return None
if self._stream_source == 'mjpeg':
# stream an MJPEG image stream directly from the camera
websession = async_get_clientsession(self.hass)
streaming_url = self._api.mjpeg_url(typeno=self._resolution)
stream_coro = websession.get(
streaming_url, auth=self._token,
timeout=CAMERA_WEB_SESSION_TIMEOUT)
return await async_aiohttp_proxy_web(
self.hass, request, stream_coro)
# streaming via ffmpeg
from haffmpeg.camera import CameraMjpeg
streaming_url = self._rtsp_url
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
streaming_url, extra_cmd=self._ffmpeg_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass, request, stream_reader,
self._ffmpeg.ffmpeg_stream_content_type)
finally:
await stream.close()
# Entity property overrides
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def device_state_attributes(self):
"""Return the Amcrest-specific camera state attributes."""
attr = {}
if self._audio_enabled is not None:
attr['audio'] = _BOOL_TO_STATE.get(self._audio_enabled)
if self._motion_recording_enabled is not None:
attr['motion_recording'] = _BOOL_TO_STATE.get(
self._motion_recording_enabled)
if self._color_bw is not None:
attr[_ATTR_COLOR_BW] = self._color_bw
return attr
@property
def available(self):
"""Return True if entity is available."""
return self._api.available
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_ON_OFF | SUPPORT_STREAM
# Camera property overrides
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._is_recording
@property
def brand(self):
"""Return the camera brand."""
return self._brand
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_detection_enabled
@property
def model(self):
"""Return the camera model."""
return self._model
async def stream_source(self):
"""Return the source of the stream."""
return self._rtsp_url
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
# Other Entity method overrides
async def async_on_demand_update(self):
"""Update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to signals and add camera to list."""
for service, params in CAMERA_SERVICES.items():
self._unsub_dispatcher.append(async_dispatcher_connect(
self.hass,
service_signal(service, self.entity_id),
getattr(self, params[1])))
self._unsub_dispatcher.append(async_dispatcher_connect(
self.hass, service_signal(SERVICE_UPDATE, self._name),
self.async_on_demand_update))
self.hass.data[DATA_AMCREST][CAMERAS].append(self.entity_id)
async def async_will_remove_from_hass(self):
"""Remove camera from list and disconnect from signals."""
self.hass.data[DATA_AMCREST][CAMERAS].remove(self.entity_id)
for unsub_dispatcher in self._unsub_dispatcher:
unsub_dispatcher()
def update(self):
"""Update entity status."""
if not self.available or self._update_succeeded:
if not self.available:
self._update_succeeded = False
return
_LOGGER.debug('Updating %s camera', self.name)
try:
if self._brand is None:
resp = self._api.vendor_information.strip()
if resp.startswith('vendor='):
self._brand = resp.split('=')[-1]
else:
self._brand = 'unknown'
if self._model is None:
resp = self._api.device_type.strip()
if resp.startswith('type='):
self._model = resp.split('=')[-1]
else:
self._model = 'unknown'
self.is_streaming = self._api.video_enabled
self._is_recording = self._api.record_mode == 'Manual'
self._motion_detection_enabled = (
self._api.is_motion_detector_on())
self._audio_enabled = self._api.audio_enabled
self._motion_recording_enabled = (
self._api.is_record_on_motion_detection())
self._color_bw = _CBW[self._api.day_night_color]
self._rtsp_url = self._api.rtsp_url(typeno=self._resolution)
except AmcrestError as error:
log_update_error(
_LOGGER, 'get', self.name, 'camera attributes', error)
self._update_succeeded = False
else:
self._update_succeeded = True
# Other Camera method overrides
def turn_off(self):
"""Turn off camera."""
self._enable_video_stream(False)
def turn_on(self):
"""Turn on camera."""
self._enable_video_stream(True)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._enable_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._enable_motion_detection(False)
# Additional Amcrest Camera service methods
async def async_enable_recording(self):
"""Call the job and enable recording."""
await self.hass.async_add_executor_job(self._enable_recording, True)
async def async_disable_recording(self):
"""Call the job and disable recording."""
await self.hass.async_add_executor_job(self._enable_recording, False)
async def async_enable_audio(self):
"""Call the job and enable audio."""
await self.hass.async_add_executor_job(self._enable_audio, True)
async def async_disable_audio(self):
"""Call the job and disable audio."""
await self.hass.async_add_executor_job(self._enable_audio, False)
async def async_enable_motion_recording(self):
"""Call the job and enable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording,
True)
async def async_disable_motion_recording(self):
"""Call the job and disable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording,
False)
async def async_goto_preset(self, preset):
"""Call the job and move camera to preset position."""
await self.hass.async_add_executor_job(self._goto_preset, preset)
async def async_set_color_bw(self, color_bw):
"""Call the job and set camera color mode."""
await self.hass.async_add_executor_job(self._set_color_bw, color_bw)
async def async_start_tour(self):
"""Call the job and start camera tour."""
await self.hass.async_add_executor_job(self._start_tour, True)
async def async_stop_tour(self):
"""Call the job and stop camera tour."""
await self.hass.async_add_executor_job(self._start_tour, False)
# Methods to send commands to Amcrest camera and handle errors
def _enable_video_stream(self, enable):
"""Enable or disable camera video stream."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# recording on if video stream is being turned off.
if self.is_recording and not enable:
self._enable_recording(False)
try:
self._api.video_enabled = enable
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera video stream', error)
else:
self.is_streaming = enable
self.schedule_update_ha_state()
if self._control_light:
self._enable_light(self._audio_enabled or self.is_streaming)
def _enable_recording(self, enable):
"""Turn recording on or off."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# video stream off if recording is being turned on.
if not self.is_streaming and enable:
self._enable_video_stream(True)
rec_mode = {'Automatic': 0, 'Manual': 1}
try:
self._api.record_mode = rec_mode[
'Manual' if enable else 'Automatic']
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera recording', error)
else:
self._is_recording = enable
self.schedule_update_ha_state()
def _enable_motion_detection(self, enable):
"""Enable or disable motion detection."""
try:
self._api.motion_detection = str(enable).lower()
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera motion detection', error)
else:
self._motion_detection_enabled = enable
self.schedule_update_ha_state()
def _enable_audio(self, enable):
"""Enable or disable audio stream."""
try:
self._api.audio_enabled = enable
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera audio stream', error)
else:
self._audio_enabled = enable
self.schedule_update_ha_state()
if self._control_light:
self._enable_light(self._audio_enabled or self.is_streaming)
def _enable_light(self, enable):
"""Enable or disable indicator light."""
try:
self._api.command(
'configManager.cgi?action=setConfig&LightGlobal[0].Enable={}'
.format(str(enable).lower()))
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'indicator light', error)
def _enable_motion_recording(self, enable):
"""Enable or disable motion recording."""
try:
self._api.motion_recording = str(enable).lower()
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera motion recording', error)
else:
self._motion_recording_enabled = enable
self.schedule_update_ha_state()
def _goto_preset(self, preset):
"""Move camera position and zoom to preset."""
try:
self._api.go_to_preset(
action='start', preset_point_number=preset)
except AmcrestError as error:
log_update_error(
_LOGGER, 'move', self.name,
'camera to preset {}'.format(preset), error)
def _set_color_bw(self, cbw):
"""Set camera color mode."""
try:
self._api.day_night_color = _CBW.index(cbw)
except AmcrestError as error:
log_update_error(
_LOGGER, 'set', self.name,
'camera color mode to {}'.format(cbw), error)
else:
self._color_bw = cbw
self.schedule_update_ha_state()
def _start_tour(self, start):
"""Start camera tour."""
try:
self._api.tour(start=start)
except AmcrestError as error:
log_update_error(
_LOGGER, 'start' if start else 'stop', self.name,
'camera tour', error)
|
apache-2.0
| 6,563,436,508,412,655,000
| 35.515528
| 79
| 0.594829
| false
| 3.952712
| false
| false
| false
|
IskyN/submeter-bill-generator
|
get_submeter_data.py
|
1
|
6582
|
from sys import stdout
from os import makedirs
from os.path import exists, abspath
from requests import Session
from datetime import datetime, timedelta
from getpass import getpass
periods_path = abspath(__file__ + "/../periods.txt")
site_url = "http://meterdata.submetersolutions.com"
login_url = "/login.php"
file_url = "/consumption_csv.php"
terminal = stdout.isatty() # not all functions work on PyCharm
def get_data(site_id, site_name, period=None):
"""
Access the online submeter database to download and save
data for a given (or asked) period.
Requires authentication.
:param str site_id: the looked-up "SiteID" param in the data query string
:param str site_name: the "SiteName" param in the data query string
:param str|List period: the month(s) to get data for (or formatted periods)
:return:
"""
# Get period to process (if not given)
if not period or not isinstance(period, list):
period = period or input("Enter a period to get data for: ")
periods = []
months = 0
try:
if len(period) == 7: # one month
start = datetime.strptime(period, "%b%Y")
end = last_day_of_month(start)
periods.append((start, end))
months += 1
else: # a period
first = datetime.strptime(period[:7], "%b%Y")
last = datetime.strptime(period[-7:], "%b%Y")
months += (last.year - first.year)*12 + \
(last.month - first.month + 1)
start = first
for _ in range(months):
end = last_day_of_month(start)
periods.append((start, end))
start = next_month(start)
except ValueError as e:
raise Exception("Incorrect period format. Accepted formats:\n"
"\tJan2016 (single month)\n"
"\tJan2016-Feb2017 (range of months)") from e
else: # properly formatted list
periods = period
months = len(periods)
# print(*periods, sep="\n")
if not exists("Data"):
makedirs("Data")
username = input("Username: ")
password = getpass() if terminal else input("Password: ")
# (Thanks to tigerFinch @ http://stackoverflow.com/a/17633072)
# Fill in your details here to be posted to the login form.
login_payload = {"txtUserName": username,
"txtPassword": password,
"btnLogin": "Login"}
query_string = {"SiteID": site_id,
"SiteName": site_name}
# print(query_string)
# Use 'with' to ensure the session context is closed after use.
with Session() as session:
response = session.post(site_url + login_url, data=login_payload)
assert response.status_code == 200, "Error from data server"
# print("url: {}".format(response.url))
assert response.url == site_url + "/propertylist.php", \
"Incorrect username/password"
update_progress_bar(0) # start progress bar
for idx, (start, end) in enumerate(periods):
if end - start > timedelta(days=55): # more than 1 month
x = start + timedelta(days=3) # actual month
y = end - timedelta(days=3) # actual month
period = "{}-{}_data.csv".format(x.strftime("%b%Y"),
y.strftime("%b%Y"))
else:
period = midpoint_day(start, end).strftime("Data/%b%Y_data.csv")
# Submeter Solutions uses inclusive dates, but City doesn't, so exclude "ToDate":
end = end - timedelta(days=1)
query_string["FromDate"] = start.strftime("%m/%d/%Y")
query_string["ToDate"] = end.strftime("%m/%d/%Y")
# print(period, ':',
# query_string["FromDate"], '-', query_string["ToDate"])
# An authorised request.
response = session.get(site_url + file_url, params=query_string)
assert response.status_code == 200, "Error from data server"
with open(period, 'xb') as f:
f.write(response.content)
update_progress_bar((idx+1) / months)
print("Data download complete. See 'Data' folder for files.")
def next_month(date):
month_after = date.replace(day=28) + timedelta(days=4) # never fails
return month_after.replace(day=1)
def last_day_of_month(date):
"""
Return the last day of the given month (leap year-sensitive),
with date unchanged.
Thanks to Augusto Men: http://stackoverflow.com/a/13565185
:param datetime date: the first day of the given month
:return: datetime
>>> d = datetime(2012, 2, 1)
>>> last_day_of_month(d)
datetime.datetime(2012, 2, 29, 0, 0)
>>> d.day == 1
True
"""
month_after = next_month(date)
return month_after - timedelta(days=month_after.day)
def midpoint_day(date1, date2):
"""
Finds the midpoint between two dates. (Rounds down.)
:type date1: datetime
:type date2: datetime
:return: datetime
>>> d1 = datetime(2016, 1, 1)
>>> d2 = datetime(2016, 1, 6)
>>> midpoint_day(d1, d2)
datetime.datetime(2016, 1, 3, 0, 0)
"""
if date1 > date2:
date1, date2 = date2, date1
return (date1 + (date2 - date1) / 2).replace(hour=0)
def update_progress_bar(percent: float):
if not terminal: # because PyCharm doesn't treat '\r' well
print("[{}{}]".format('#' * int(percent * 20),
' ' * (20 - int(percent * 20))))
elif percent == 1:
print("Progress: {:3.1%}".format(percent))
else:
print("Progress: {:3.1%}\r".format(percent), end="")
if __name__ == "__main__":
if not terminal:
print("WARNING: This is not a TTY/terminal. "
"Passwords will not be hidden.")
if periods_path and exists(periods_path):
p = []
with open(periods_path, 'r') as pf:
for line in pf:
if line[0] != '#': # skip comment lines
top, pot = line.split()[:2] # ignore inline comments
top = datetime.strptime(top, "%Y-%m-%d")
pot = datetime.strptime(pot, "%Y-%m-%d")
assert top < pot, "Improper period range (start !< end)"
p.append((top, pot))
get_data("128", "Brimley Plaza", p)
else:
get_data("128", "Brimley Plaza")
|
apache-2.0
| 4,653,421,852,300,319,000
| 36.827586
| 93
| 0.558037
| false
| 3.855888
| false
| false
| false
|
Glottotopia/aagd
|
moin/local/moin/MoinMoin/macro/_tests/test_Hits.py
|
1
|
3829
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - MoinMoin.macro Hits tested
@copyright: 2007-2008 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os
from MoinMoin import caching, macro
from MoinMoin.logfile import eventlog
from MoinMoin.PageEditor import PageEditor
from MoinMoin.Page import Page
from MoinMoin._tests import become_trusted, create_page, make_macro, nuke_eventlog, nuke_page
class TestHits:
"""Hits: testing Hits macro """
pagename = u'AutoCreatedMoinMoinTemporaryTestPageForHits'
def setup_class(self):
request = self.request
become_trusted(request)
self.page = create_page(request, self.pagename, u"Foo!")
# for that test eventlog needs to be empty
nuke_eventlog(request)
# hits is based on hitcounts which reads the cache
caching.CacheEntry(request, 'charts', 'hitcounts', scope='wiki').remove()
def teardown_class(self):
nuke_page(self.request, self.pagename)
def _test_macro(self, name, args):
m = make_macro(self.request, self.page)
return m.execute(name, args)
def _cleanStats(self):
# cleans all involved cache and log files
nuke_eventlog(self.request)
# hits is based on hitcounts which reads the cache
caching.CacheEntry(self.request, 'charts', 'hitcounts', scope='wiki').remove()
arena = Page(self.request, self.pagename)
caching.CacheEntry(self.request, arena, 'hitcounts', scope='item').remove()
def testHitsNoArg(self):
""" macro Hits test: 'no args for Hits (Hits is executed on current page) """
# <count> log entries for the current page and one for WikiSandBox simulating viewing
count = 3
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': 'WikiSandBox'})
for i in range(count):
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': self.pagename})
result = self._test_macro(u'Hits', u'')
self._cleanStats()
assert result == str(count)
def testHitsForAll(self):
""" macro Hits test: 'all=True' for Hits (all pages are counted for VIEWPAGE) """
# <count> * <num_pages> log entries for simulating viewing
pagenames = ['WikiSandBox', self.pagename]
num_pages = len(pagenames)
count = 2
for i in range(count):
for pagename in pagenames:
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': pagename})
result = self._test_macro(u'Hits', u'all=True')
self._cleanStats()
assert result == str(count * num_pages)
def testHitsForFilter(self):
""" macro Hits test: 'event_type=SAVEPAGE' for Hits (SAVEPAGE counted for current page)"""
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': self.pagename})
# simulate a log entry SAVEPAGE for WikiSandBox to destinguish current page
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': 'WikiSandBox'})
result = self._test_macro(u'Hits', u'event_type=SAVEPAGE')
self._cleanStats()
assert result == "1"
def testHitsForAllAndFilter(self):
""" macro test: 'all=True, event_type=SAVEPAGE' for Hits (all pages are counted for SAVEPAGE)"""
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': 'WikiSandBox'})
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': self.pagename})
result = self._test_macro(u'Hits', u'all=True, event_type=SAVEPAGE')
self._cleanStats()
assert result == "2"
coverage_modules = ['MoinMoin.macro.Hits']
|
mit
| 7,299,168,824,747,804,000
| 43.047059
| 104
| 0.64116
| false
| 3.51607
| true
| false
| false
|
briennakh/BIOF509
|
Wk02/genetic_algorithm_optimizer.py
|
1
|
5552
|
"""Module to calculate best path between multiple points, using genetic algorithm
Functions:
new_path -- path altering function that creates a new path
distance -- cost function that calculates distance as the cost of a path
select_best -- function that selects the best paths in a population
recombine -- path altering function that returns a child path recombined from two parent paths
genetic_algorithm_optimizer -- objective function that implements the genetic algorithm
"""
import random
def new_path(existing_path):
"""Switch two random consecutive points on a path
Arguments received:
existing_path -- list of coordinates, e.g. [(0, 0), (10, 5), (10, 10)], representing a path
Arguments returned:
path -- list of coordinates representing the mutated path
"""
path = existing_path[:]
point = random.randint(0, len(path)-2) # randomly choose a point between 1st and 2nd-to-last points on path
path[point+1], path[point] = path[point], path[point+1] # switch this point with the next point
return path
def distance(coords):
"""Calculate the distance of a path between multiple points
Arguments received:
coords — list of coordinates representing a path
Arguments returned:
distance -- total distance as a float
"""
distance = 0
for p1, p2 in zip(coords[:-1], coords[1:]):
distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
return distance
def select_best(population, cost_func, num_to_keep):
"""Select a given number of paths with the lowest cost (the best paths)
Arguments received:
population -- an array of lists of coordinates representing paths
cost_func -- function to calculate cost of a path
num_to_keep -- number of paths to select
Arguments returned:
[i[0] for i in scored_population[:num_to_keep]] -- an array of lists of coordinates representing the best paths
"""
scored_population = [(i, cost_func(i)) for i in population] # create a list of tuples: (path, cost)
scored_population.sort(key=lambda x: x[1]) # sort list by cost, lowest to highest
return [i[0] for i in scored_population[:num_to_keep]] # return num_to_keep paths with the lowest cost
def recombine(population):
"""Cross over two parent paths and return the resulting child path
Arguments received:
population -- an array of lists of coordinates representing paths
Arguments returned:
child -- list of coordinates representing a recombined child path
"""
# Randomly choose two parents
options = list(range(len(population))) # from 1 to 125
random.shuffle(options)
partner1 = options[0]
partner2 = options[1]
# Choose a split point, take the first parent's order to that split point,
# then the second parent's order for all remaining points
split_point = random.randint(0, len(population[0])-1)
child = population[partner1][:split_point]
for point in population[partner2]:
if point not in child:
child.append(point)
return child
# Our genetic algorithm function currently only uses recombination. As we saw from the simulated
# annealing approach mutation is also a powerful tool in locating the optimal solution.
# Add mutation to the genetic algorithm function using the new_path function we created.
def genetic_algorithm_optimizer(starting_path, cost_func, new_path_func, pop_size, generations):
"""Calculate the best path between multiple points using a genetic algorithm
The genetic algorithm begins with a given path, which it shuffles to create a starting population of a given
size. Once the population is generated, the cost of each path is evaluated. The top 25 percent then are sent
through recombination, then mutation -- to hopefully generate 'better' paths -- to form a new population.
Arugments received:
starting_path -- list of coordinates representing a path
cost_func -- function to calculate cost of a path
new_path_func -- function to generate a new path with two random consecutive points switched
pop_size -- population size, or amount of paths in one generation
generations -- number of iterations
Arguments returned:
population[0] -- list of coordinates representing the best path
cost_func(population[0]) -- cost of the best path
history -- an array of objects, each object containing information about each tested path
"""
# Create a starting population of 500 paths by randomly shuffling the points
population = []
for i in range(pop_size):
new_path = starting_path[:]
random.shuffle(new_path)
population.append(new_path)
history = []
# Take the top 25% of routes and recombine to create new routes, repeating for generations
for i in range(generations):
pop_best = select_best(population, cost_func, int(pop_size / 4))
new_population = []
mutated_population = []
for i in range(pop_size):
new_population.append(recombine(pop_best))
if (random.random() <= 1/len(new_population[i])): # mutation probability, 1/path length
mutated_population.append(new_path_func(new_population[i])) # mutate
else:
mutated_population.append(new_population[i]) # don't mutate
population = mutated_population
record = {'generation': i, 'current_cost': cost_func(population[0]), }
history.append(record)
return (population[0], cost_func(population[0]), history)
|
mit
| 957,501,307,047,593,300
| 42.359375
| 115
| 0.698018
| false
| 4.176072
| false
| false
| false
|
amkahn/event-extraction
|
extract_events.py
|
1
|
12233
|
#!/usr/bin/python
# Written by Andrea Kahn
# Last updated Aug. 29, 2014
'''
This script takes as input:
1) A path to a file containing patients' clinic notes, each line having the format: MRN [tab] date [tab] description [tab] note (one note per line; date must be in format YYYY-MM-DD, YYYY-MM, or YYYY)
2) A path to the file containing keywords to search on, each line having the format: keyword [tab] position ([tab] window size), where position is PRE-DATE or POST-DATE and the parenthesized content is optional (default window size = 100) (NB: casing of keywords is ignored)
3) Optionally, a float corresponding to the minimum score a date candidate must have in order to be output (default = 0.0)
4) Optionally, an int corresponding to the minimum number of dates to be output, regardless of whether they all have the minimum score (NB: if the number of date candidates extracted is lower than this int, only the number of date candidates extracted will be output) (default = 0)
It then extracts dates correlated with the keywords from the patients' clinic notes and prints to standard out lines in the following format (one line per patient):
MRN [tab] date1 [tab] score1 [tab] date2 [tab] score2 ...
...where MRNs are sorted alphabetically, and dates for a particular patient appear in descending order by score.
To switch to verbose output (lists of supporting snippets are printed after scores), comment line 61 and uncomment line 62.
'''
from sys import argv
import logging
from date import *
from date_candidate import *
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.WARNING)
def main():
logging.basicConfig()
notes_filename = argv[1]
keywords_filename = argv[2]
if len(argv) > 3:
filter = argv[3]
if len(argv) > 4:
n = argv[4]
else:
n = 0
else:
filter = 0.0
n = 0
notes_file = open(notes_filename)
notes_dict = get_notes_dict(notes_file)
notes_file.close()
LOG.debug("Here is the notes dictionary: %s" % notes_dict)
keywords_file = open(keywords_filename)
keywords_list = get_keywords_list(keywords_file)
keywords_file.close()
LOG.debug("Here is the keywords list: %s" % keywords_list)
extracted = {}
for MRN in notes_dict:
extracted[MRN] = extract_events(notes_dict[MRN], keywords_list, filter, n)
print_output(extracted)
# print_output(extracted, True)
class ClinicNote(object):
'''
A ClinicNote has attributes 'date' (a Date object corresponding to the document creation date), 'desc' (a string corresponding to the description of the clinic note), and 'text' (a text blob corresponding to the contents of the note).
'''
def __init__(self, date, desc, text):
self.date = date
self.desc = desc
self.text = text
def __repr__(self):
return "date: %s; desc: %s" % (self.date, self.desc)
# return "date: %s; desc: %s; text: %s" % (self.date, self.desc, self.text)
class Keyword(object):
'''
A Keyword has 'text' (the keyword itself), 'position' (the string 'PRE-DATE' or 'POST-DATE'), an int 'window' (the number of characters before or after the keyword in which to look for a date). The last attribute, if not passed into the __init__ method, defaults to 100.
'''
def __init__(self, text, position, window=100):
if position not in ['PRE-DATE', 'POST-DATE']:
LOG.warning("Bad position value %s; setting position to None)" % str(position))
self.text = text
self.position = position
self.window = int(window)
def __repr__(self):
return "(%s, %s, %s)" % (self.text, self.position, str(self.window))
def get_notes_dict(file):
'''
This method takes as input an open file object and returns a dictionary of MRNs mapped to lists of ClinicNote objects corresponding to the clinic notes for that patient.
'''
notes_dict = {}
for line in file:
line_elements = line.strip().split('\t')
if len(line_elements) not in [3, 4]:
LOG.warning("Bad notes file line format; skipping: %s" % line)
else:
if len(line_elements) == 3:
note = ClinicNote(line_elements[1], line_elements[2], '')
else:
note = ClinicNote(line_elements[1], line_elements[2], line_elements[3])
if notes_dict.get(line_elements[0]):
notes_dict[line_elements[0]].append(note)
else:
notes_dict[line_elements[0]] = [note]
return notes_dict
def get_keywords_list(file):
'''
This method takes as input an open file object and returns a list of Keyword objects.
'''
keywords = []
for line in file:
line_elements = line.strip().split('\t')
if len(line_elements) not in [2, 3]:
LOG.warning("Bad keywords file line format; skipping: %s" % line)
else:
text = line_elements[0]
position = line_elements[1]
if len(line_elements) == 3:
keyword = Keyword(text, position, line_elements[2])
else:
keyword = Keyword(text, position)
keywords.append(keyword)
return keywords
def extract_events(notes_list, keywords_list, filter=0.0, n=0):
'''
This function takes as input a list of ClinicNote objects, a list of Keyword objects, an optional minimum confidence score (float; default = 0.0), and an optional int 'n' referring to the minimum number of candidate dates to be returned (default = 0), and returns a list of DateCandidate objects corresponding with date expressions that the system has identified in the patient's clinic notes based on Keyword objects.
'''
extracted = get_date_candidates(notes_list, keywords_list)
rerank_candidates(extracted, filter, n)
return extracted
def naive_extract_events(notes):
'''
This function takes as input a list of ClinicNote objects and returns a list of DateCandidate objects corresponding with ALL date expressions that the system has identified in the patient's clinic notes. (Not called in current code, it is intended to be used to establish a recall ceiling for evaluation -- i.e., to see how many of the gold dates actually appear in the notes at all.)
'''
candidates = []
for note in notes:
dates = [x[0] for x in extract_dates_and_char_indices(note.text)]
for d in dates:
date_candidate = DateCandidate(d, [note.text])
candidates.append(date_candidate)
rerank_candidates(candidates)
return candidates
def get_date_candidates(notes, keywords):
'''
This method takes as input a list of ClinicNote objects and a list of Keyword objects. It then returns a list of DateCandidate objects representing dates that appear in the clinic notes correlated with the input keywords.
'''
candidates = []
pre_date_keywords = filter(lambda x: x.position=='PRE-DATE', keywords)
post_date_keywords = filter(lambda x: x.position=='POST-DATE', keywords)
LOG.debug("Here are the pre-date keywords: %s" % pre_date_keywords)
LOG.debug("Here are the post-date keywords: %s" % post_date_keywords)
# Store the window sizes in a dictionary that maps (keyword text, position) tuples to window sizes
window_sizes = {}
for keyword in keywords:
window_sizes[(keyword.text.lower(), keyword.position)] = keyword.window
if pre_date_keywords:
# pre_date_regex = re.compile('|'.join(['['+keyword[0].upper()+keyword[0]+']'+keyword[1:] for keyword in pre_date_keywords]))
pre_date_keywords = map(lambda w: ''.join(map(lambda x: '[' + x.upper() + x + ']', w.text)), pre_date_keywords)
pre_date_regex = re.compile('|'.join(pre_date_keywords))
if post_date_keywords:
# post_date_regex = re.compile('|'.join(['['+keyword[0].upper()+keyword[0]+']'+keyword[1:] for keyword in post_date_keywords]))
post_date_keywords = map(lambda w: ''.join(map(lambda x: '[' + x.upper() + x + ']', w.text)), post_date_keywords)
post_date_regex = re.compile('|'.join(post_date_keywords))
for note in notes:
if pre_date_keywords:
pre_date_matches = pre_date_regex.finditer(note.text)
for match in pre_date_matches:
LOG.debug("Found pre-date keyword match: %s" % match.group(0))
window_size = window_sizes[(match.group(0).lower(), 'PRE-DATE')]
# Set the window beginning at the start of the match to pre_date_window_size characters or all remaining characters, whichever is less
window = note.text[match.start(0):(match.end(0)+window_size)]
# Look for first date in window -- do not pass a period or the end of the text
snippet = re.split('[.]|[a-z],|dmitted|:.*:', window)[0]
LOG.debug("Looking for date in: %s" % snippet)
event_date_str = extract_date(snippet, 'first')
LOG.debug("Extracted: %s" % event_date_str)
if event_date_str:
LOG.debug("Found date expression: %s" % event_date_str)
event_dates = make_date(event_date_str)
# FIXME: Consider alternatives that keep coordinated dates together (or throw them out entirely)
if event_dates:
for event_date in event_dates:
date_candidate = DateCandidate(event_date, [snippet])
candidates.append(date_candidate)
else:
LOG.debug("No date expression found")
if post_date_keywords:
LOG.debug("Looking for postdate matches")
post_date_matches = post_date_regex.finditer(note.text)
for match in post_date_matches:
LOG.debug("Found post-date keyword match: %s" % match.group(0))
window_size = window_sizes[(match.group(0).lower(), 'POST-DATE')]
# Set the window to include the event expression and the prewindow_size characters before the event expression or all preceding characters, whichever is less
window = note.text[(match.start(0)-window_size):match.end(0)]
# Look for the last date in the window -- do not pass a period
snippet = re.split('[.]|[a-z],|<%END%>|ischarge|dmitted.{20}', window)[-1]
LOG.debug("Looking for date in: %s" % snippet)
event_date_str = extract_date(snippet, 'last')
LOG.debug("Extracted: %s" % event_date_str)
if event_date_str:
LOG.debug("Found date expression: %s" % event_date_str)
event_dates = make_date(event_date_str)
if event_dates:
for event_date in event_dates:
date_candidate = DateCandidate(event_date, [snippet])
candidates.append(date_candidate)
return candidates
def print_output(output_dict, verbose=False):
'''
This method takes as input a hash of MRNs mapped to lists of DateCandidate objects and a boolean True or False specifying whether or not supporting snippets should be printed (default: False), and prints to standard out lines in the following format: MRN [tab] date1 [tab] score1 [tab] (snippets_list1 [tab]) date2 [tab] score2 (snippets_list2 [tab])... , where dates appear in descending order by score.
'''
for MRN in output_dict:
sorted_candidates = sorted(output_dict[MRN], key=lambda candidate: candidate.score, reverse=True)
if verbose:
print MRN+'\t'+'\t'.join([c.date.make_date_expression()+'\t'+str(c.score)+'\t'+str(c.snippets) for c in sorted_candidates])
else:
print MRN+'\t'+'\t'.join([c.date.make_date_expression()+'\t'+str(c.score) for c in sorted_candidates])
if __name__=='__main__':
main()
|
mit
| 6,596,118,473,499,229,000
| 45.340909
| 422
| 0.622578
| false
| 3.960181
| false
| false
| false
|
evfredericksen/gmapsbounds
|
gmapsbounds/reader.py
|
1
|
6819
|
from gmapsbounds import utils
from gmapsbounds import constants
from gmapsbounds import llpx
from gmapsbounds import polygon
def get_nodes(rgb_image):
nodes = []
width, height = rgb_image.size
do_not_check = set()
menu_borders = utils.get_menu_borders(rgb_image)
for x in range(width):
for y in range(height):
if ((y in range(menu_borders[0][0], menu_borders[0][1] + 1) and
x in range (menu_borders[1][0], menu_borders[1][1] + 1)) or
(x, y) in do_not_check):
continue
r, g, b = rgb_image.getpixel((x, y))
if [r, g, b] == constants.INVALID[2]:
exclude_surrounding_nodes(x, y, do_not_check, rgb_image)
if valid_color(r, g, b) and no_invalid_adjacent(x, y, rgb_image):
nodes.append(Node(x, y))
if not nodes:
raise RuntimeError('Could not detect a boundary around this location')
nodes[0].visited = True
return nodes
def prune_extra_nodes(polygons):
pruned_polygons = []
for poly in polygons:
if len(poly.nodes) < constants.MINIMUM_PRUNING_SIZE:
assert len(poly.nodes) > 2
pruned_polygons.append(poly)
continue
pruned = polygon.Polygon(poly.nodes[:2])
for node in poly.nodes[2:]:
if utils.get_distance(pruned.nodes[-1], node) <= 1:
continue
end_node = None
if utils.same_line(pruned.nodes[-2], pruned.nodes[-1], node):
end_node = node
else:
if end_node is None:
end_node = node
pruned.nodes.append(end_node)
if len(pruned.nodes) > 2:
pruned_polygons.append(pruned)
return pruned_polygons
def exclude_surrounding_nodes(x, y, nodes_to_exclude, rgb_im, depth=5):
for i in range(-depth, depth + 1):
for j in range(-depth, depth + 1):
r, g, b = rgb_im.getpixel((x+i, y+j))
if [r, g, b] != constants.INVALID[2] or (i == 0 and j == 0):
try:
nodes_to_exclude.add((x+i, y+j))
except:
pass
def valid_color(r, g, b):
if ([r, g, b] in constants.VALID or
(r in constants.ALLOWABLE_RED and g in constants.ALLOWABLE_GREEN and
b in constants.ALLOWABLE_BLUE and abs(g - b) < constants.MAX_BLUE_GREEN_DIFFERENCE)):
return True
return False
def no_invalid_adjacent(x, y, image):
for i in range(-2, 3):
for j in range(-2, 3):
try:
r, g, b = image.getpixel((x + i, y + j))
if [r, g, b] in constants.INVALID or (r in constants.ALLOWABLE_RED and 100 > g == b):
return False
except IndexError:
return False
return True
def get_polygons(nodes, rgb_im):
polygons = []
unvisited = [node for node in nodes if node.visited is False]
while unvisited:
poly = polygon.Polygon()
current = unvisited[0]
current.visited = True
closest, distance = get_closest_unvisited_node(current, nodes, rgb_im)
if distance is not None and distance > constants.MAX_NODE_DIFFERENCE:
unvisited = unvisited[1:]
continue
while closest is not None:
poly.nodes.append(current)
current = closest
current.visited = True
closest, distance = get_closest_unvisited_node(current, nodes, rgb_im)
if closest is None:
break
i = -1
while distance > constants.MAX_NODE_DIFFERENCE:
if (current is poly.nodes[0] or i < -constants.MAX_NODE_BACKTRACK
or (utils.get_distance(poly.nodes[0], current) < constants.MAX_NODE_DIFFERENCE)):
closest = None
break
current = poly.nodes[i]
closest, distance = get_closest_unvisited_node(current, unvisited, rgb_im)
i -= 1
if len(poly.nodes) > 2:
polygons.append(poly)
unvisited = [node for node in nodes if node.visited is False]
return polygons
def prune_overlapping_nodes(polygons):
assert polygons
polygons = utils.sort_by_polygon_length(polygons)
polygons.reverse()
exterior_polygons = [polygons[0]]
for test_polygon in polygons[1:]:
starting_count = len(test_polygon.nodes)
for exterior_polygon in exterior_polygons:
exterior_nodes = test_polygon.get_exterior_nodes(exterior_polygon)
if not exterior_nodes:
if len(test_polygon.nodes) == starting_count:
exterior_polygon.inner = test_polygon
elif (exterior_polygon is exterior_polygons[-1] and
len(exterior_nodes) > 2 and
utils.get_distance(exterior_nodes[0], exterior_nodes[-1]) <=
constants.MAX_NODE_DIFFERENCE):
test_polygon.nodes = exterior_nodes
exterior_polygons.append(test_polygon)
break
return exterior_polygons
def get_closest_unvisited_node(current, nodes, rgb_im):
closest_node = None
shortest_distance = None
pos = nodes.index(current)
i = 1
go_up = True
go_down = True
while (0 <= pos - i or len(nodes) > pos + i) and (go_up or go_down):
for sign in [-1, 1]:
if sign == -1 and not go_down or sign == 1 and not go_up:
continue
index = pos + i*sign
if not 0 <= index < len(nodes):
continue
node = nodes[index]
if closest_node is not None:
if sign == -1 and shortest_distance < current.x - node.x:
go_down = False
elif sign == 1 and shortest_distance < node.x - current.x:
go_up = False
if node.visited:
continue
distance = utils.get_distance(nodes[pos], node)
distance *= utils.get_water_multiplier(current, node, rgb_im)
if shortest_distance is None or distance < shortest_distance:
closest_node = node
shortest_distance = distance
i += 1
return closest_node, shortest_distance
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.location = None
self.visited = False
def get_lat_lng(self):
return llpx.pixels_to_lat_lng(self.location.offset[0] - self.location.pixcenter[0] + self.x,
self.location.offset[1] - self.location.pixcenter[1] + self.y, self.location.zoom)
def __str__(self):
return '<Node at {}, {}>'.format(self.x, self.y)
def __repr__(self):
return self.__str__()
|
mit
| 9,046,952,827,354,978,000
| 37.971429
| 101
| 0.555947
| false
| 3.775748
| true
| false
| false
|
arunchandramouli/fanofpython
|
code/features/datatypes/lists1.py
|
1
|
3368
|
'''
Aim :: To demonstrate the use of a list
Define a simple list , add values to it and iterate and print it
A list consists of comma seperated values which could be of any type
which is reprsented as [,,,,] .. all values are enclosed between '[' and ']'
** A list object is a mutable datatype which means it couldn't be hashed
Anything that can be hashed can be set as a dictionary key **
Modifying an exisiting list will not result in a new list object,
memory address will not be changed too.
There are 2 scenarios of modification;
-> Edit the existing item
-> Both Mutable and Immutable datatypes can be edited, memory location not changed
-> Replace the existing item
-> Both mutable and immutable can be replaced
'''
'''
Empty Mutable Types ...
'''
list1 = []
dict1 = {}
set1 = set()
'''
Empty Immutable Types ...
'''
tuple1 = ()
str1 = ""
'''
Define a simple list with multiple datatypes
'''
def_list = [1,2,"1","100","Python","Anne","A!@345<>_()",True,False,{1:100,2:200,3:300},range(10)]
'''
Now create a variable
'''
vara = def_list
'''
Modification of vara will result in modifying def_list
'''
vara.append("Hero")
print "Address of vara and def_list %s and %s "%(id(vara),id(def_list)),'\n\n'
print "vara = %s "%(vara),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Partial Slice ...
When a slice is created partially , we are actually breaking a container
into pieces , hence it shall represent a new memory location.
Hence modification of such will not affect the original container
'''
getmeasliceofit = def_list[3:]
print "Address of getmeasliceofit and def_list %s and %s "%(id(getmeasliceofit),id(def_list)),'\n\n'
print "getmeasliceofit = %s "%(getmeasliceofit),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Full Slice ...
When a slice is created fully , we are actually creating a container
which has its original values but represents the same address.
Hence modification of such will affect the original container
for eg ::
If you verify all of the address below, but for getmeasliceofit, rest are all the same
if I edit as def_list[0:] = range(5) , def_list will also get modified
Meanwhile also If I edit as def_list[3:] = range(5), def_list will get modified
But If I edit getmeasliceofit def_list will not get modified
'''
getmeasliceofit = def_list[:]
print "Address == ",id(def_list),'\n',id(def_list[3:]),'\n',id(getmeasliceofit),'\n',id(def_list[::]),'\n',id(def_list[0:]),'\n',id(def_list[:]),'\n'
'''
Modifying def_list[3:] will affect def_list , but modifying getmeasliceofit doesn't
This is because getmeasliceofit resides at a different memory location.
'''
print '\n\n' , def_list , '\n\n'
def_list[3:] = range(50)
getmeasliceofit = None
print def_list , '\n\n\n',def_list[3:],'\n\n' , getmeasliceofit,'\n\n\n'
print 'Analyze memory locations of mutables examples ... ... ','\n\n'
sayx = [1,2,3,4,5]
print id(sayx),'\n'
sayx = [4,5,6,7,8]
print id(sayx),'\n'
x = range(10)
print id(x),'\n'
x = range(10,50)
print id(x),'\n'
print 'Modify a mutable it shall still refer same location ... ... ','\n\n'
''' A Simple list '''
sayx = [1,2,3,4,5]
print id(sayx),'\n'
''' A Simple list modified - change element @ position 4 '''
sayx[4] = range(10)
print id(sayx),'\n'
|
gpl-3.0
| 158,409,647,628,030,750
| 19.542683
| 149
| 0.663005
| false
| 2.967401
| false
| false
| false
|
IntersectAustralia/dc2c
|
mecat/rifcs/publishservice.py
|
1
|
2428
|
from tardis.tardis_portal.publish.publishservice import PublishService
PARTY_RIFCS_FILENAME = "MyTARDIS-party-%s.xml"
COLLECTION_RIFCS_FILENAME = "MyTARDIS-%s-dataset-%s.xml"
class PartyPublishService(PublishService):
def get_template(self, type):
return self.provider.get_template(type=type)
def _remove_rifcs_from_oai_dir(self, oaipath):
#owner = self.experiment.created_by
#import os
#filename = os.path.join(oaipath, PARTY_RIFCS_FILENAME % owner.id)
#if os.path.exists(filename):
# os.remove(filename)
return
def _write_rifcs_to_oai_dir(self, oaipath):
from tardis.tardis_portal.xmlwriter import XMLWriter
xmlwriter = XMLWriter()
owner = self.experiment.created_by
xmlwriter.write_template_to_dir(oaipath, PARTY_RIFCS_FILENAME % owner.id,
self.get_template(type="party"), self.get_context())
class CollectionPublishService(PublishService):
def get_template(self, type):
return self.provider.get_template(type=type)
def remove_specific_rifcs(self, oaipath, dataset_id):
import os
filename = os.path.join(oaipath, COLLECTION_RIFCS_FILENAME % (self.experiment.id, dataset_id) )
if os.path.exists(filename):
os.remove(filename)
def _remove_rifcs_from_oai_dir(self, oaipath):
import os
datasets = self.experiment.dataset_set
for dataset_vals in datasets.values():
dataset_id = dataset_vals['id']
filename = os.path.join(oaipath, COLLECTION_RIFCS_FILENAME % (self.experiment.id, dataset_id) )
if os.path.exists(filename):
os.remove(filename)
def _write_rifcs_to_oai_dir(self, oaipath):
from tardis.tardis_portal.xmlwriter import XMLWriter
xmlwriter = XMLWriter()
datasets = self.experiment.dataset_set
for dataset_vals in datasets.values():
dataset_id = dataset_vals['id']
self.provider.set_dataset_id(dataset_id)
xmlwriter.write_template_to_dir(oaipath, COLLECTION_RIFCS_FILENAME %
(self.experiment.id, dataset_id),
self.get_template(type="dataset"),
self.get_context())
|
gpl-3.0
| -6,296,705,034,827,789,000
| 43.163636
| 107
| 0.601318
| false
| 3.829653
| false
| false
| false
|
CSIS/proccer
|
src/proccer/t/test_periodic.py
|
1
|
1030
|
from __future__ import with_statement
from datetime import datetime, timedelta
from mock import patch
from proccer.database import Job
from proccer.periodic import main
from proccer.t.testing import setup_module
def test_periodic():
still_bad_job = Job.create(session, 'foo', 'bar', 'baz')
still_bad_job.last_seen = still_bad_job.last_stamp = datetime(1979, 7, 7)
still_bad_job.state = 'error'
still_bad_job.warn_after = timedelta(seconds=1)
silent_bad_job = Job.create(session, 'foo', 'bar', 'baz')
silent_bad_job.last_seen = silent_bad_job.last_stamp = datetime(1979, 7, 7)
silent_bad_job.state = 'error'
silent_bad_job.warn_after = None
still_late_job = Job.create(session, 'foo', 'bar', 'baz')
still_late_job.last_seen = still_late_job.last_stamp = datetime(1979, 7, 7)
still_late_job.state = 'error'
still_late_job.warn_after = timedelta(seconds=1)
session.flush()
# FIXME - This needs real tests!
with patch('proccer.notifications.smtplib'):
main()
|
mit
| -5,977,464,837,307,305,000
| 33.333333
| 79
| 0.683495
| false
| 3.130699
| false
| false
| false
|
houqp/floyd-cli
|
floyd/cli/experiment.py
|
1
|
11976
|
import click
from tabulate import tabulate
from time import sleep
import webbrowser
import sys
from shutil import copyfile
import os
import floyd
from floyd.cli.utils import (
get_module_task_instance_id,
normalize_job_name,
get_namespace_from_name
)
from floyd.client.experiment import ExperimentClient
from floyd.client.module import ModuleClient
from floyd.client.project import ProjectClient
from floyd.client.resource import ResourceClient
from floyd.client.task_instance import TaskInstanceClient
from floyd.exceptions import FloydException
from floyd.manager.experiment_config import ExperimentConfigManager
from floyd.manager.floyd_ignore import FloydIgnoreManager
from floyd.model.experiment_config import ExperimentConfig
from floyd.log import logger as floyd_logger
from floyd.cli.utils import read_yaml_config
# Log output which defines the exit status of the job
SUCCESS_OUTPUT = "[success] Finished execution"
FAILURE_OUTPUT = "[failed] Task execution failed"
SHUTDOWN_OUTPUT = "[shutdown] Task execution cancelled"
TIMEOUT_OUTPUT = "[timeout] Task execution cancelled"
TERMINATION_OUTPUT_LIST = [SUCCESS_OUTPUT,
FAILURE_OUTPUT,
SHUTDOWN_OUTPUT,
TIMEOUT_OUTPUT]
@click.command()
@click.argument('project_name', nargs=1)
def init(project_name):
"""
Initialize new project at the current path.
After this you can run other FloydHub commands like status and run.
"""
project_obj = ProjectClient().get_by_name(project_name)
if not project_obj:
namespace, name = get_namespace_from_name(project_name)
create_project_base_url = "{}/projects/create".format(floyd.floyd_web_host)
create_project_url = "{}?name={}&namespace={}".format(create_project_base_url, name, namespace)
floyd_logger.info(('Project name does not yet exist on floydhub.com. '
'Create your new project on floydhub.com:\n\t%s'),
create_project_base_url)
webbrowser.open(create_project_url)
name = click.prompt('Press ENTER to use project name "%s" or enter a different name' % project_name, default=project_name, show_default=False)
project_name = name.strip() or project_name
project_obj = ProjectClient().get_by_name(project_name)
if not project_obj:
raise FloydException('Project "%s" does not exist on floydhub.com. Ensure it exists before continuing.' % project_name)
namespace, name = get_namespace_from_name(project_name)
experiment_config = ExperimentConfig(name=name,
namespace=namespace,
family_id=project_obj.id)
ExperimentConfigManager.set_config(experiment_config)
FloydIgnoreManager.init()
yaml_config = read_yaml_config()
if not yaml_config:
copyfile(os.path.join(os.path.dirname(__file__), 'default_floyd.yml'), 'floyd.yml')
floyd_logger.info("Project \"%s\" initialized in current directory", project_name)
@click.command()
@click.argument('id', required=False, nargs=1)
def status(id):
"""
View status of all jobs in a project.
The command also accepts a specific job name.
"""
if id:
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
print_experiments([experiment])
else:
experiments = ExperimentClient().get_all()
print_experiments(experiments)
def print_experiments(experiments):
"""
Prints job details in a table. Includes urls and mode parameters
"""
headers = ["JOB NAME", "CREATED", "STATUS", "DURATION(s)", "INSTANCE", "DESCRIPTION", "METRICS"]
expt_list = []
for experiment in experiments:
expt_list.append([normalize_job_name(experiment.name),
experiment.created_pretty, experiment.state,
experiment.duration_rounded, experiment.instance_type_trimmed,
experiment.description, format_metrics(experiment.latest_metrics)])
floyd_logger.info(tabulate(expt_list, headers=headers))
def format_metrics(latest_metrics):
return ', '.join(
["%s=%s" % (k, latest_metrics[k]) for k in sorted(latest_metrics.keys())]
) if latest_metrics else ''
@click.command()
@click.argument('id', nargs=1)
@click.option('--path', '-p',
help='Download files in a specific path from a job')
def clone(id, path):
"""
- Download all files from a job
Eg: alice/projects/mnist/1/
Note: This will download the files that were originally uploaded at
the start of the job.
- Download files in a specific path from a job
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id, use_config=False))
except FloydException:
experiment = ExperimentClient().get(id)
task_instance_id = get_module_task_instance_id(experiment.task_instances)
task_instance = TaskInstanceClient().get(task_instance_id) if task_instance_id else None
if not task_instance:
sys.exit("Cannot clone this version of the job. Try a different version.")
module = ModuleClient().get(task_instance.module_id) if task_instance else None
if path:
# Download a directory from Code
code_url = "{}/api/v1/download/artifacts/code/{}?is_dir=true&path={}".format(floyd.floyd_host,
experiment.id,
path)
else:
# Download the full Code
code_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host,
module.resource_id)
ExperimentClient().download_tar(url=code_url,
untar=True,
delete_after_untar=True)
@click.command()
@click.argument('job_name_or_id', nargs=1, required=False)
def info(job_name_or_id):
"""
View detailed information of a job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(job_name_or_id))
except FloydException:
experiment = ExperimentClient().get(job_name_or_id)
task_instance_id = get_module_task_instance_id(experiment.task_instances)
task_instance = TaskInstanceClient().get(task_instance_id) if task_instance_id else None
normalized_job_name = normalize_job_name(experiment.name)
table = [["Job name", normalized_job_name],
["Created", experiment.created_pretty],
["Status", experiment.state], ["Duration(s)", experiment.duration_rounded],
["Instance", experiment.instance_type_trimmed],
["Description", experiment.description],
["Metrics", format_metrics(experiment.latest_metrics)]]
if task_instance and task_instance.mode in ['jupyter', 'serving']:
table.append(["Mode", task_instance.mode])
table.append(["Url", experiment.service_url])
if experiment.tensorboard_url:
table.append(["TensorBoard", experiment.tensorboard_url])
floyd_logger.info(tabulate(table))
def get_log_id(job_id):
log_msg_printed = False
while True:
try:
experiment = ExperimentClient().get(normalize_job_name(job_id))
except FloydException:
experiment = ExperimentClient().get(job_id)
instance_log_id = experiment.instance_log_id
if instance_log_id:
break
elif not log_msg_printed:
floyd_logger.info("Waiting for logs ...\n")
log_msg_printed = True
sleep(1)
return instance_log_id
def follow_logs(instance_log_id, sleep_duration=1):
"""
Follow the logs until Job termination.
"""
cur_idx = 0
job_terminated = False
while not job_terminated:
# Get the logs in a loop and log the new lines
log_file_contents = ResourceClient().get_content(instance_log_id)
print_output = log_file_contents[cur_idx:]
# Get the status of the Job from the current log line
job_terminated = any(terminal_output in print_output for terminal_output in TERMINATION_OUTPUT_LIST)
cur_idx += len(print_output)
sys.stdout.write(print_output)
sleep(sleep_duration)
@click.command()
@click.option('-u', '--url', is_flag=True, default=False, help='Only print url for accessing logs')
@click.option('-f', '--follow', is_flag=True, default=False, help='Keep streaming the logs in real time')
@click.argument('id', nargs=1, required=False)
def logs(id, url, follow, sleep_duration=1):
"""
View the logs of a job.
To follow along a job in real time, use the --follow flag
"""
instance_log_id = get_log_id(id)
if url:
log_url = "{}/api/v1/resources/{}?content=true".format(
floyd.floyd_host, instance_log_id)
floyd_logger.info(log_url)
return
if follow:
floyd_logger.info("Launching job ...")
follow_logs(instance_log_id, sleep_duration)
else:
log_file_contents = ResourceClient().get_content(instance_log_id)
if len(log_file_contents.strip()):
floyd_logger.info(log_file_contents.rstrip())
else:
floyd_logger.info("Launching job now. Try after a few seconds.")
@click.command()
@click.option('-u', '--url', is_flag=True, default=False, help='Only print url for accessing logs')
@click.argument('id', nargs=1, required=False)
def output(id, url):
"""
View the files from a job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
output_dir_url = "%s/%s/files" % (floyd.floyd_web_host, experiment.name)
if url:
floyd_logger.info(output_dir_url)
else:
floyd_logger.info("Opening output path in your browser ...")
webbrowser.open(output_dir_url)
@click.command()
@click.argument('id', nargs=1, required=False)
def stop(id):
"""
Stop a running job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
if experiment.state not in ["queued", "queue_scheduled", "running"]:
floyd_logger.info("Job in {} state cannot be stopped".format(experiment.state))
sys.exit(1)
if not ExperimentClient().stop(experiment.id):
floyd_logger.error("Failed to stop job")
sys.exit(1)
floyd_logger.info("Experiment shutdown request submitted. Check status to confirm shutdown")
@click.command()
@click.argument('names', nargs=-1)
@click.option('-y', '--yes', is_flag=True, default=False, help='Skip confirmation')
def delete(names, yes):
"""
Delete a training job.
"""
failures = False
for name in names:
try:
experiment = ExperimentClient().get(normalize_job_name(name))
except FloydException:
experiment = ExperimentClient().get(name)
if not experiment:
failures = True
continue
if not yes and not click.confirm("Delete Job: {}?".format(experiment.name),
abort=False,
default=False):
floyd_logger.info("Job {}: Skipped.".format(experiment.name))
continue
if not ExperimentClient().delete(experiment.id):
failures = True
else:
floyd_logger.info("Job %s Deleted", experiment.name)
if failures:
sys.exit(1)
|
apache-2.0
| -4,036,297,198,941,373,000
| 35.181269
| 150
| 0.632933
| false
| 3.904793
| true
| false
| false
|
arthurdejong/python-stdnum
|
stdnum/eu/vat.py
|
1
|
5505
|
# vat.py - functions for handling European VAT numbers
# coding: utf-8
#
# Copyright (C) 2012-2021 Arthur de Jong
# Copyright (C) 2015 Lionel Elie Mamane
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""VAT (European Union VAT number).
The European Union VAT number consists of a 2 letter country code (ISO
3166-1, except Greece which uses EL) followed by a number that is
allocated per country.
The exact format of the numbers varies per country and a country-specific
check is performed on the number using the VAT module that is relevant for
that country.
>>> compact('ATU 57194903')
'ATU57194903'
>>> validate('BE697449992')
'BE0697449992'
>>> validate('FR 61 954 506 077')
'FR61954506077'
>>> guess_country('00449544B01')
['nl']
"""
from stdnum.exceptions import *
from stdnum.util import clean, get_cc_module, get_soap_client
MEMBER_STATES = set([
'at', 'be', 'bg', 'cy', 'cz', 'de', 'dk', 'ee', 'es', 'fi', 'fr', 'gr',
'hr', 'hu', 'ie', 'it', 'lt', 'lu', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro',
'se', 'si', 'sk', 'xi',
])
"""The collection of country codes that are queried. Greece is listed with a
country code of gr while for VAT purposes el is used instead. For Northern
Ireland numbers are prefixed with xi of United Kingdom numbers."""
_country_modules = dict()
vies_wsdl = 'https://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl'
"""The WSDL URL of the VAT Information Exchange System (VIES)."""
def _get_cc_module(cc):
"""Get the VAT number module based on the country code."""
# Greece uses a "wrong" country code
cc = cc.lower()
if cc == 'el':
cc = 'gr'
if cc not in MEMBER_STATES:
return
if cc == 'xi':
cc = 'gb'
if cc not in _country_modules:
_country_modules[cc] = get_cc_module(cc, 'vat')
return _country_modules[cc]
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, '').upper().strip()
cc = number[:2]
module = _get_cc_module(cc)
if not module:
raise InvalidComponent()
number = module.compact(number)
if not number.startswith(cc):
number = cc + number
return number
def validate(number):
"""Check if the number is a valid VAT number. This performs the
country-specific check for the number."""
number = clean(number, '').upper().strip()
cc = number[:2]
module = _get_cc_module(cc)
if not module:
raise InvalidComponent()
number = module.validate(number)
if not number.startswith(cc):
number = cc + number
return number
def is_valid(number):
"""Check if the number is a valid VAT number. This performs the
country-specific check for the number."""
try:
return bool(validate(number))
except ValidationError:
return False
def guess_country(number):
"""Guess the country code based on the number. This checks the number
against each of the validation routines and returns the list of countries
for which it is valid. This returns lower case codes and returns gr (not
el) for Greece."""
return [cc
for cc in MEMBER_STATES
if _get_cc_module(cc).is_valid(number)]
def check_vies(number, timeout=30): # pragma: no cover (not part of normal test suite)
"""Query the online European Commission VAT Information Exchange System
(VIES) for validity of the provided number. Note that the service has
usage limitations (see the VIES website for details). The timeout is in
seconds. This returns a dict-like object."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the VIES website
number = compact(number)
client = get_soap_client(vies_wsdl, timeout)
return client.checkVat(number[:2], number[2:])
def check_vies_approx(number, requester, timeout=30): # pragma: no cover
"""Query the online European Commission VAT Information Exchange System
(VIES) for validity of the provided number, providing a validity
certificate as proof. You will need to give your own VAT number for this
to work. Note that the service has usage limitations (see the VIES
website for details). The timeout is in seconds. This returns a dict-like
object."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the VIES website
number = compact(number)
requester = compact(requester)
client = get_soap_client(vies_wsdl, timeout)
return client.checkVatApprox(
countryCode=number[:2], vatNumber=number[2:],
requesterCountryCode=requester[:2], requesterVatNumber=requester[2:])
|
lgpl-2.1
| -3,631,167,909,865,830,000
| 35.946309
| 87
| 0.695186
| false
| 3.767967
| false
| false
| false
|
tinloaf/home-assistant
|
homeassistant/components/homekit_controller/__init__.py
|
1
|
10546
|
"""
Support for Homekit device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homekit_controller/
"""
import json
import logging
import os
from homeassistant.components.discovery import SERVICE_HOMEKIT
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import call_later
REQUIREMENTS = ['homekit==0.12.0']
DOMAIN = 'homekit_controller'
HOMEKIT_DIR = '.homekit'
# Mapping from Homekit type to component.
HOMEKIT_ACCESSORY_DISPATCH = {
'lightbulb': 'light',
'outlet': 'switch',
'switch': 'switch',
'thermostat': 'climate',
}
HOMEKIT_IGNORE = [
'BSB002',
'Home Assistant Bridge',
'TRADFRI gateway'
]
KNOWN_ACCESSORIES = "{}-accessories".format(DOMAIN)
KNOWN_DEVICES = "{}-devices".format(DOMAIN)
CONTROLLER = "{}-controller".format(DOMAIN)
_LOGGER = logging.getLogger(__name__)
REQUEST_TIMEOUT = 5 # seconds
RETRY_INTERVAL = 60 # seconds
class HomeKitConnectionError(ConnectionError):
"""Raised when unable to connect to target device."""
def get_serial(accessory):
"""Obtain the serial number of a HomeKit device."""
# pylint: disable=import-error
from homekit.model.services import ServicesTypes
from homekit.model.characteristics import CharacteristicsTypes
for service in accessory['services']:
if ServicesTypes.get_short(service['type']) != \
'accessory-information':
continue
for characteristic in service['characteristics']:
ctype = CharacteristicsTypes.get_short(
characteristic['type'])
if ctype != 'serial-number':
continue
return characteristic['value']
return None
class HKDevice():
"""HomeKit device."""
def __init__(self, hass, host, port, model, hkid, config_num, config):
"""Initialise a generic HomeKit device."""
_LOGGER.info("Setting up Homekit device %s", model)
self.hass = hass
self.controller = hass.data[CONTROLLER]
self.host = host
self.port = port
self.model = model
self.hkid = hkid
self.config_num = config_num
self.config = config
self.configurator = hass.components.configurator
self._connection_warning_logged = False
self.pairing = self.controller.pairings.get(hkid)
if self.pairing is not None:
self.accessory_setup()
else:
self.configure()
def accessory_setup(self):
"""Handle setup of a HomeKit accessory."""
# pylint: disable=import-error
from homekit.model.services import ServicesTypes
self.pairing.pairing_data['AccessoryIP'] = self.host
self.pairing.pairing_data['AccessoryPort'] = self.port
try:
data = self.pairing.list_accessories_and_characteristics()
except HomeKitConnectionError:
call_later(
self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup())
return
for accessory in data:
serial = get_serial(accessory)
if serial in self.hass.data[KNOWN_ACCESSORIES]:
continue
self.hass.data[KNOWN_ACCESSORIES][serial] = self
aid = accessory['aid']
for service in accessory['services']:
service_info = {'serial': serial,
'aid': aid,
'iid': service['iid']}
devtype = ServicesTypes.get_short(service['type'])
_LOGGER.debug("Found %s", devtype)
component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None)
if component is not None:
discovery.load_platform(self.hass, component, DOMAIN,
service_info, self.config)
def device_config_callback(self, callback_data):
"""Handle initial pairing."""
import homekit # pylint: disable=import-error
code = callback_data.get('code').strip()
try:
self.controller.perform_pairing(self.hkid, self.hkid, code)
except homekit.UnavailableError:
error_msg = "This accessory is already paired to another device. \
Please reset the accessory and try again."
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
return
except homekit.AuthenticationError:
error_msg = "Incorrect HomeKit code for {}. Please check it and \
try again.".format(self.model)
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
return
except homekit.UnknownError:
error_msg = "Received an unknown error. Please file a bug."
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
raise
self.pairing = self.controller.pairings.get(self.hkid)
if self.pairing is not None:
pairing_file = os.path.join(
self.hass.config.path(),
HOMEKIT_DIR,
'pairing.json'
)
self.controller.save_data(pairing_file)
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.request_done(_configurator)
self.accessory_setup()
else:
error_msg = "Unable to pair, please try again"
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
def configure(self):
"""Obtain the pairing code for a HomeKit device."""
description = "Please enter the HomeKit code for your {}".format(
self.model)
self.hass.data[DOMAIN+self.hkid] = \
self.configurator.request_config(self.model,
self.device_config_callback,
description=description,
submit_caption="submit",
fields=[{'id': 'code',
'name': 'HomeKit code',
'type': 'string'}])
class HomeKitEntity(Entity):
"""Representation of a Home Assistant HomeKit device."""
def __init__(self, accessory, devinfo):
"""Initialise a generic HomeKit device."""
self._name = accessory.model
self._accessory = accessory
self._aid = devinfo['aid']
self._iid = devinfo['iid']
self._address = "homekit-{}-{}".format(devinfo['serial'], self._iid)
self._features = 0
self._chars = {}
def update(self):
"""Obtain a HomeKit device's state."""
try:
pairing = self._accessory.pairing
data = pairing.list_accessories_and_characteristics()
except HomeKitConnectionError:
return
for accessory in data:
if accessory['aid'] != self._aid:
continue
for service in accessory['services']:
if service['iid'] != self._iid:
continue
self.update_characteristics(service['characteristics'])
break
@property
def unique_id(self):
"""Return the ID of this device."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._accessory.pairing is not None
def update_characteristics(self, characteristics):
"""Synchronise a HomeKit device state with Home Assistant."""
raise NotImplementedError
def put_characteristics(self, characteristics):
"""Control a HomeKit device state from Home Assistant."""
chars = []
for row in characteristics:
chars.append((
row['aid'],
row['iid'],
row['value'],
))
self._accessory.pairing.put_characteristics(chars)
def setup(hass, config):
"""Set up for Homekit devices."""
# pylint: disable=import-error
import homekit
from homekit.controller import Pairing
hass.data[CONTROLLER] = controller = homekit.Controller()
data_dir = os.path.join(hass.config.path(), HOMEKIT_DIR)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
pairing_file = os.path.join(data_dir, 'pairings.json')
if os.path.exists(pairing_file):
controller.load_data(pairing_file)
# Migrate any existing pairings to the new internal homekit_python format
for device in os.listdir(data_dir):
if not device.startswith('hk-'):
continue
alias = device[3:]
if alias in controller.pairings:
continue
with open(os.path.join(data_dir, device)) as pairing_data_fp:
pairing_data = json.load(pairing_data_fp)
controller.pairings[alias] = Pairing(pairing_data)
controller.save_data(pairing_file)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for Homekit discovery events."""
# model, id
host = discovery_info['host']
port = discovery_info['port']
model = discovery_info['properties']['md']
hkid = discovery_info['properties']['id']
config_num = int(discovery_info['properties']['c#'])
if model in HOMEKIT_IGNORE:
return
# Only register a device once, but rescan if the config has changed
if hkid in hass.data[KNOWN_DEVICES]:
device = hass.data[KNOWN_DEVICES][hkid]
if config_num > device.config_num and \
device.pairing_info is not None:
device.accessory_setup()
return
_LOGGER.debug('Discovered unique device %s', hkid)
device = HKDevice(hass, host, port, model, hkid, config_num, config)
hass.data[KNOWN_DEVICES][hkid] = device
hass.data[KNOWN_ACCESSORIES] = {}
hass.data[KNOWN_DEVICES] = {}
discovery.listen(hass, SERVICE_HOMEKIT, discovery_dispatch)
return True
|
apache-2.0
| 554,708,964,111,326,800
| 34.870748
| 78
| 0.591125
| false
| 4.22516
| true
| false
| false
|
williamroot/opps
|
opps/channels/admin.py
|
1
|
3732
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from mptt.admin import MPTTModelAdmin
from .models import Channel
from .forms import ChannelAdminForm
from opps.core.admin import PublishableAdmin
from opps.core.admin import apply_opps_rules
from opps.core.permissions.admin import AdminViewPermission
from opps.core.utils import get_template_path
import json
@apply_opps_rules('channels')
class ChannelAdmin(PublishableAdmin, MPTTModelAdmin, AdminViewPermission):
prepopulated_fields = {"slug": ("name",)}
list_display = ['name', 'show_channel_path', 'get_parent', 'site',
'date_available', 'homepage', 'order', 'show_in_menu',
'published']
list_filter = ['date_available', 'published', 'site', 'homepage', 'parent',
'show_in_menu']
search_fields = ['name', 'slug', 'long_slug', 'description']
exclude = ('user', 'long_slug')
raw_id_fields = ['parent', 'main_image']
form = ChannelAdminForm
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'parent', 'name', 'slug', 'layout', 'hat',
'description', 'main_image',
'order', ('show_in_menu', 'menu_url_target'),
'include_in_main_rss', 'homepage', 'group',
'paginate_by')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
def get_parent(self, obj):
if obj.parent_id:
long_slug, slug = obj.long_slug.rsplit("/", 1)
return long_slug
get_parent.admin_order_field = "parent"
get_parent.short_description = "Parent"
def show_channel_path(self, obj):
return unicode(obj)
show_channel_path.short_description = _(u'Channel Path')
def save_model(self, request, obj, form, change):
long_slug = u"{0}".format(obj.slug)
if obj.parent:
long_slug = u"{0}/{1}".format(obj.parent.slug, obj.slug)
obj.long_slug = long_slug
super(ChannelAdmin, self).save_model(request, obj, form, change)
def get_form(self, request, obj=None, **kwargs):
form = super(ChannelAdmin, self).get_form(request, obj, **kwargs)
channel_json = []
def _get_template_path(_path):
template = get_template_path(_path)
with open(template) as f:
_jsonData = f.read().replace('\n', '')
return json.loads(_jsonData)
def _get_json_channel(_obj):
return _get_template_path(
u'containers/{0}/channel.json'.format(_obj.long_slug))
def _get_json_channel_recursivelly(_obj):
channel_json = []
try:
channel_json = _get_json_channel(_obj)
except:
_is_root = _obj.is_root_node()
if not _is_root:
channel_json = _get_json_channel_recursivelly(_obj.parent)
elif _is_root:
try:
channel_json = _get_template_path(
u'containers/channel.json')
except:
pass
finally:
return channel_json
channel_json = _get_json_channel_recursivelly(obj)
if u'layout' in channel_json:
layout_list = ['default'] + [l for l in channel_json['layout']]
layout_choices = (
(n, n.title()) for n in layout_list)
form.base_fields['layout'].choices = layout_choices
return form
admin.site.register(Channel, ChannelAdmin)
|
mit
| -3,664,716,400,936,687,000
| 34.542857
| 79
| 0.556002
| false
| 3.871369
| false
| false
| false
|
ptrsxu/snippetpy
|
ds/ringbuffer.py
|
1
|
1268
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""from python cookbook 2nd edition."""
class RingBuffer(object):
""" a ringbuffer not filled """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full(object):
""" a ringbuffer filled """
def append(self, x):
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def tolist(self):
""" return the list with real order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
""" add an element at the end of the buffer """
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# chang the state of the instance to "FULL" forever
self.__class__ = self.__Full
def tolist(self):
""" return the list with real order """
return self.data
def main():
x = RingBuffer(5)
x.append(1)
x.append(2)
x.append(3)
x.append(4)
print x.__class__, x.tolist()
x.append(5)
x.append(6)
x.append(7)
print x.__class__, x.tolist()
x.append(8)
x.append(9)
x.append(10)
print x.__class__, x.tolist()
if __name__ == "__main__":
main()
|
mit
| 683,121,483,345,865,500
| 22.924528
| 63
| 0.518927
| false
| 3.390374
| false
| false
| false
|
TheProjecter/jxtl
|
test/test.py
|
1
|
1738
|
#
# $Id$
#
# Description
# Runs the same tests, but does it by using the Python language bindings.
# The Python bindings need to be be built and installed to run this.
#
# Copyright 2010 Dan Rinehimer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import libjxtl;
import glob;
import os.path;
import filecmp;
def format_case( value, format, context ):
if ( format == "upper" ):
return value.upper();
elif ( format == "lower" ):
return value.lower();
else:
return value;
def compare( file1, file2 ):
if ( filecmp.cmp( file1, file2 ) == False ):
print "Failed test in " + os.path.dirname( file1 );
else:
os.remove( file2 );
inputs = glob.glob( "./t*/input" );
beers_xml = libjxtl.xml_to_dict( "t.xml" );
beers_json = libjxtl.json_to_dict( "t.json" );
t = libjxtl.Template();
for input in inputs:
dir = os.path.dirname( input );
t.load( input );
t.register_format( "upper", format_case );
t.register_format( "lower", format_case );
t.expand_to_file( dir + "/test.output", beers_xml );
compare( dir + "/output", dir + "/test.output" );
t.expand_to_file( dir + "/test.output", beers_json );
compare( dir + "/output", dir + "/test.output" );
|
apache-2.0
| -1,674,219,676,627,167,200
| 30.6
| 75
| 0.659379
| false
| 3.361702
| true
| false
| false
|
FourthLion/pydatasentry
|
pydatasentry/capture.py
|
1
|
1585
|
#!/usr/bin/env python
import uuid
import inspect
import json
import os, sys
import copy
from .config import get_config
from .helpers import dumper, merge
from .process import summarize_run
def capture_input(args, kwargs, metadata):
"""
Capture the function parameters for the functions that have been instrumented
"""
formula = kwargs.get('formula', None)
data = kwargs.get('data', None)
sentryopts = kwargs.pop('sentryopts', {})
# Inspect and get the source files...
curframe = inspect.currentframe()
calframes = inspect.getouterframes(curframe, 3)
filename = os.path.realpath(calframes[2][1])
lineno = calframes[2][2]
snippet = calframes[2][4]
uid = str(uuid.uuid1())
params = {
'uuid': uid,
'source': {
'filename': filename,
'lineno': lineno,
'snippet': snippet
},
'model': {
'library': {
'module': metadata['modname'],
'function': metadata['funcname']
},
'parameters': {
'formula': formula,
'data': data
},
}
#'other parameters': {
# 'args': args,
# 'kwargs': kwargs
#},
}
run = get_config()
merge(run, params)
merge(run, sentryopts)
return run
def capture_output(run, result):
"""
Capture the results of the instrumented function
"""
run['model']['result'] = result
summarize_run(run)
return
|
mit
| 5,243,514,757,579,922,000
| 23.384615
| 81
| 0.536278
| false
| 4.106218
| false
| false
| false
|
mrakitin/sirepo
|
sirepo/srdb.py
|
1
|
1714
|
# -*- coding: utf-8 -*-
u"""db configuration
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkconfig
from pykern import pkinspect
from pykern import pkio
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
import os.path
import sys
#: Relative to current directory only in dev mode
_DEFAULT_ROOT = 'run'
#: Configured root either by server_set_root or cfg
_root = None
def root():
return _root or _init_root()
@pkconfig.parse_none
def _cfg_root(value):
"""Config value or root package's parent or cwd with `_DEFAULT_ROOT`"""
return value
def _init_root():
global cfg, _root
cfg = pkconfig.init(
root=(None, _cfg_root, 'where database resides'),
)
v = cfg.root
if v:
assert os.path.isabs(v), \
'{}: SIREPO_SRDB_ROOT must be absolute'.format(v)
assert os.path.isdir(v), \
'{}: SIREPO_SRDB_ROOT must be a directory and exist'.format(v)
v = pkio.py_path(v)
else:
assert pkconfig.channel_in('dev'), \
'SIREPO_SRDB_ROOT must be configured except in DEV'
fn = sys.modules[pkinspect.root_package(_init_root)].__file__
root = pkio.py_path(pkio.py_path(pkio.py_path(fn).dirname).dirname)
# Check to see if we are in our dev directory. This is a hack,
# but should be reliable.
if not root.join('requirements.txt').check():
# Don't run from an install directory
root = pkio.py_path('.')
v = pkio.mkdir_parent(root.join(_DEFAULT_ROOT))
_root = v
return v
|
apache-2.0
| 8,552,786,995,130,998,000
| 29.070175
| 75
| 0.63769
| false
| 3.380671
| true
| false
| false
|
StichtingOpenGeo/transitpubsub
|
src/zmq_network.py
|
1
|
1444
|
from consts import ZMQ_SERVER_NETWORK, ZMQ_PUBSUB_KV17
from network import network
from helpers import serialize
import zmq
import sys
# Initialize the cached network
sys.stderr.write('Caching networkgraph...')
net = network()
sys.stderr.write('Done!\n')
# Initialize a zeromq context
context = zmq.Context()
# Set up a channel to receive network requests
sys.stderr.write('Setting up a ZeroMQ REP: %s\n' % (ZMQ_SERVER_NETWORK))
client = context.socket(zmq.REP)
client.bind(ZMQ_SERVER_NETWORK)
# Set up a channel to receive KV17 requests
sys.stderr.write('Setting up a ZeroMQ SUB: %s\n' % (ZMQ_PUBSUB_KV17))
subscribe_kv17 = context.socket(zmq.SUB)
subscribe_kv17.connect(ZMQ_PUBSUB_KV17)
subscribe_kv17.setsockopt(zmq.SUBSCRIBE, '')
# Set up a poller
poller = zmq.Poller()
poller.register(client, zmq.POLLIN)
poller.register(subscribe_kv17, zmq.POLLIN)
sys.stderr.write('Ready.\n')
while True:
socks = dict(poller.poll())
if socks.get(client) == zmq.POLLIN:
arguments = client.recv().split(',')
if arguments[0] == 'j' and len(arguments) == 2:
client.send(serialize(net.journeypatterncode(arguments[1])))
elif arguments[0] == 'p' and len(arguments) == 7:
client.send(serialize(net.passed(arguments[1], arguments[2], arguments[3], arguments[4], arguments[5], arguments[6])))
else:
client.send('')
elif socks.get(subscribe_kv17) == zmq.POLLIN:
pass
|
agpl-3.0
| 4,215,510,292,400,556,500
| 30.391304
| 130
| 0.693906
| false
| 3.187638
| false
| false
| false
|
maxalbert/colormap-selector
|
mapping_3d_to_2d_test.py
|
1
|
1798
|
import numpy as np
from cross_section import Plane
from mapping_3d_to_2d import *
def test_initialise_mapping_3d_to_2d_simple():
"""
Check that for a plane orthogonal to the x-axis the transformation
simply drops the constant x-coordinate.
"""
plane1 = Plane([50, 0, 0], n=[1, 0, 0])
f1 = Mapping3Dto2D(plane1)
# Check the 3d -> 2d transformation
assert np.allclose(f1.apply([50, -1, 4]), [-1, 4])
assert np.allclose(f1.apply([50, 3, 7]), [3, 7])
# Check the 2d -> 3d transformation
assert np.allclose(f1.apply_inv([-1, 4]), [50, -1, 4])
assert np.allclose(f1.apply_inv([3, 7]), [50, 3, 7])
assert f1.apply_inv([-1, 4]).ndim == 1
assert f1.apply_inv([[-1, 4]]).ndim == 2
assert np.allclose(f1.apply_inv([[-1, 4], [3, 7]]), [[50, -1, 4], [50, 3, 7]])
# Regression test: check that applying the transformation does not
# change the shape/dimension of the input array.
pt1 = np.array([2., 6., 4.])
pt2 = np.array([[2., 6., 4.]])
_ = f1.apply(pt1)
_ = f1.apply(pt2)
assert pt1.shape == (3,)
assert pt2.shape == (1, 3)
plane2 = Plane([0, 30, 0], n=[0, 1, 0])
f2 = Mapping3Dto2D(plane2)
# Check the 3d -> 2d transformation
assert np.allclose(f2.apply([-1, 30, 4]), [-1, 4])
assert np.allclose(f2.apply([3, 30, 7]), [3, 7])
# Check the 2d -> 3d transformation
assert np.allclose(f2.apply_inv([-1, 4]), [-1, 30, 4])
assert np.allclose(f2.apply_inv([3, 7]), [3, 30, 7])
# Regression test: check that applying the inverse transformation
# does not change the shape/dimension of the input array.
pt1 = np.array([2., 6.])
pt2 = np.array([[2., 6.]])
_ = f1.apply_inv(pt1)
_ = f1.apply_inv(pt2)
assert pt1.shape == (2,)
assert pt2.shape == (1, 2)
|
mit
| 6,102,046,278,074,886,000
| 33.576923
| 82
| 0.585095
| false
| 2.840442
| false
| false
| false
|
bram85/topydo
|
test/test_view.py
|
1
|
1542
|
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from topydo.lib import Filter
from topydo.lib.Sorter import Sorter
from topydo.lib.TodoFile import TodoFile
from topydo.lib.TodoList import TodoList
from .facilities import load_file, print_view, todolist_to_string
from .topydo_testcase import TopydoTest
class ViewTest(TopydoTest):
def test_view(self):
""" Check filters and printer for views. """
todofile = TodoFile('test/data/FilterTest1.txt')
ref = load_file('test/data/ViewTest1-result.txt')
todolist = TodoList(todofile.read())
sorter = Sorter('text')
todofilter = Filter.GrepFilter('+Project')
view = todolist.view(sorter, [todofilter])
self.assertEqual(print_view(view), todolist_to_string(ref))
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 3,500,970,238,699,097,000
| 35.714286
| 71
| 0.723735
| false
| 3.552995
| true
| false
| false
|
thoreg/suds
|
suds/client.py
|
1
|
25571
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception('No services defined')
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound('at [%d]' % name)
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound(name)
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception('No ports defined: %s' % self.__qn)
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound(qn)
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound(qn)
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound(qn)
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug("message for '%s' created: %s", self.method.name, timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug("method '%s' invoked: %s", self.method.name, timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202, 204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = {'Content-Type': 'text/xml; charset=utf-8', 'SOAPAction': action}
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
|
lgpl-3.0
| 7,985,433,385,657,277,000
| 31.95232
| 82
| 0.573032
| false
| 4.135695
| false
| false
| false
|
liamcurry/py3kwarn
|
py3kwarn2to3/pgen2/pgen.py
|
1
|
13781
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from __future__ import print_function
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = sorted(self.dfas.keys())
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.items():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = sorted(self.dfas.keys())
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.items():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.items():
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
|
mit
| -1,833,032,193,287,119,000
| 34.702073
| 78
| 0.495682
| false
| 4.140925
| false
| false
| false
|
JNU-Include/CNN
|
Test/lab-11-4-mnist_cnn_ensemble2.py
|
1
|
2134
|
# Lab 11 MNIST and Deep learning CNN
import tensorflow as tf
from lib.ensemble.ensemble_core import EnsembleCore
from lib.ensemble.mnist_core import MnistCore
from lib.ensemble.cnn_core import CNNCore
class MyCNN (CNNCore):
def init_network(self):
self.set_placeholder(784, 10, 28, 28)
self.DO = tf.placeholder(tf.float32)
L1 = self.convolution_layer(self.X_2d, 3, 3, 1, 32, 1, 1)
L1 = self.relu(L1)
L1_maxpool = self.max_pool(L1, 2, 2, 2, 2)
L1_maxpool = self.dropout(L1_maxpool)
L2 = self.convolution_layer(L1_maxpool, 3, 3, 32, 64, 1, 1)
L2 = self.relu(L2)
L2_maxpool = self.max_pool(L2, 2, 2, 2, 2)
L2_maxpool = self.dropout(L2_maxpool)
L3 = self.convolution_layer(L2_maxpool, 3, 3, 64, 128, 1, 1)
L3 = self.relu(L3)
L3_maxpool = self.max_pool(L3, 2, 2, 2, 2)
L3_maxpool = self.dropout(L3_maxpool)
# L4 FC 4x4x128 inputs -> 625 outputs
reshaped = tf.reshape(L3_maxpool, [-1, 128 * 4 * 4])
L4 = self.fully_connected_layer(reshaped, 128 * 4 * 4, 625, 'W4')
L4 = self.relu(L4)
L4 = self.dropout(L4)
self.logit = self.fully_connected_layer(L4, 625, 10, 'W5')
self.set_hypothesis(self.logit)
self.set_cost_function()
self.set_optimizer(0.001)
class MyEnsemble (EnsembleCore):
mnist = MnistCore()
def load_db(self):
self.mnist.load_mnist()
def set_networks(self, sess, num_of_network):
self.create_networks(sess, MyCNN, 'network_name', 7)
def get_number_of_segment(self, seg_size):
return self.mnist.get_number_of_segment(seg_size)
def get_next_segment(self, seg_size):
return self.mnist.get_next_segment(seg_size)
def get_test_data(self):
return self.mnist.get_test_x_data(), self.mnist.get_test_y_data()
gildong = MyEnsemble()
gildong.learn_ensemble(7, 15, 100)
gildong.evaluate_all_models()
'''
0 Accuracy: 0.9933
1 Accuracy: 0.9946
2 Accuracy: 0.9934
3 Accuracy: 0.9935
4 Accuracy: 0.9935
5 Accuracy: 0.9949
6 Accuracy: 0.9941
Ensemble accuracy: 0.9952
'''
|
mit
| 3,337,229,261,010,518,000
| 26.358974
| 73
| 0.628397
| false
| 2.7289
| false
| false
| false
|
compas-dev/compas
|
src/compas_rhino/objects/inspectors/meshinspector.py
|
1
|
3026
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from System.Collections.Generic import List
from System.Drawing.Color import FromArgb
from Rhino.Geometry import Point3d
from Rhino.Geometry import Line
from compas_rhino.conduits import BaseConduit
from compas_rhino.ui import Mouse
from compas.geometry import length_vector
from compas.geometry import cross_vectors
from compas.geometry import subtract_vectors
__all__ = ['MeshVertexInspector']
class MeshVertexInspector(BaseConduit):
"""Inspect mesh topology at the vertices.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
tol: float, optional
dotcolor: rgb-tuple, optional
textcolor: rgb-tuple, optional
linecolor: rgb-tuple, optional
"""
def __init__(self, mesh, tol=0.1, dotcolor=None, textcolor=None, linecolor=None, **kwargs):
super(MeshVertexInspector, self).__init__(**kwargs)
self._vertex_xyz = None
dotcolor = dotcolor or (255, 255, 0)
textcolor = textcolor or (0, 0, 0)
linecolor = linecolor or (255, 255, 0)
self.mesh = mesh
self.tol = tol
self.dotcolor = FromArgb(*dotcolor)
self.textcolor = FromArgb(*textcolor)
self.linecolor = FromArgb(*linecolor)
self.mouse = Mouse(self)
self.vertex_nbr = {
vertex: [(vertex, nbr) if mesh.has_edge((vertex, nbr)) else (nbr, vertex) for nbr in mesh.vertex_neighbors(vertex)]
for vertex in mesh.vertices()
}
@property
def vertex_xyz(self):
if not self._vertex_xyz:
self._vertex_xyz = {vertex: self.mesh.vertex_attributes(vertex, 'xyz') for vertex in self.mesh.vertices()}
return self._vertex_xyz
@vertex_xyz.setter
def vertex_xyz(self, vertex_xyz):
self._vertex_xyz = vertex_xyz
def enable(self):
"""Enable the conduit."""
self.mouse.Enabled = True
self.Enabled = True
def disable(self):
"""Disable the conduit."""
self.mouse.Enabled = False
self.Enabled = False
def DrawForeground(self, e):
draw_dot = e.Display.DrawDot
draw_arrows = e.Display.DrawArrows
a = self.mouse.p1
b = self.mouse.p2
ab = subtract_vectors(b, a)
Lab = length_vector(ab)
if not Lab:
return
for index, vertex in enumerate(self.vertex_xyz):
c = self.vertex_xyz[vertex]
D = length_vector(cross_vectors(subtract_vectors(a, c), subtract_vectors(b, c)))
if D / Lab < self.tol:
point = Point3d(*c)
draw_dot(point, str(index), self.dotcolor, self.textcolor)
lines = List[Line](len(self.vertex_nbr[vertex]))
for u, v in self.vertex_nbr[vertex]:
lines.Add(Line(Point3d(* self.vertex_xyz[u]), Point3d(* self.vertex_xyz[v])))
draw_arrows(lines, self.linecolor)
break
|
mit
| 1,955,176,110,724,072,700
| 33
| 127
| 0.618638
| false
| 3.69475
| false
| false
| false
|
mysociety/pombola
|
pombola/core/kenya_import_scripts/import_contacts_from_tuples.py
|
1
|
1646
|
#!/usr/bin/env python
import os
import sys
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from pombola.core import models
from django.contrib.contenttypes.models import ContentType
import mp_contacts
phone_kind = models.ContactKind.objects.get(slug='phone')
email_kind = models.ContactKind.objects.get(slug='email')
for row in mp_contacts.entries:
(name, phone, email) = row
if not (phone or email):
continue
# code needs reworking now that the name structure of the database has changed
matches = models.Person.objects.all().is_politician().name_matches( name )
if matches.count() == 0:
print " no match for '%s', '%s', '%s'" % (name, phone, email)
continue
if matches.count() > 1:
print " several matches for %s" % name
continue
mp = matches[0]
# print "%s -> %s" % ( name, mp.name )
content_type = ContentType.objects.get_for_model(mp)
source = "SUNY Kenya spreadsheet entry for '%s'" % name
if phone:
models.Contact.objects.get_or_create(
content_type=content_type,
object_id=mp.id,
value=phone,
kind=phone_kind,
defaults = {
"source":source,
}
)
if email:
models.Contact.objects.get_or_create(
content_type=content_type,
object_id=mp.id,
value=email,
kind=email_kind,
defaults = {
"source":source,
}
)
|
agpl-3.0
| -652,200,642,074,956,900
| 22.514286
| 82
| 0.565006
| false
| 3.749431
| false
| false
| false
|
linuxscout/tashaphyne
|
tashaphyne/stemming.py
|
1
|
47427
|
# -*- coding: UTF-8 -*-
"""
Arabic Light Stemmer
A class which provides a configurable stemmer
and segmentor for arabic text.
Features:
=========
- Arabic word Light Stemming.
- Root Extraction.
- Word Segmentation
- Word normalization
- Default Arabic Affixes list.
- An customizable Light stemmer: possibility of change
stemmer options and data.
- Data independent stemmer
@author: Taha Zerrouki <taha_zerrouki at gmail dot com>
@author: Taha Zerrouki
@contact: taha dot zerrouki at gmail dot com
@copyright: Arabtechies, Arabeyes, Taha Zerrouki
@license: GPL
@date:2017/02/15
@version:0.3
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
division,
)
import re
import sys
sys.path.append('../support/')
import pyarabic.araby as araby
if __name__ == "__main__":
sys.path.append('../')
import normalize
import stem_const
import affix_const
import roots_const
import verb_stamp_const
import arabicstopwords
else:
from . import normalize
from . import stem_const
from . import affix_const
from . import roots_const
from . import verb_stamp_const
from . import arabicstopwords
class ArabicLightStemmer:
"""
ArabicLightStemmer: a class which proved a configurable stemmer
and segmentor for arabic text.
Features:
=========
- Arabic word Light Stemming.
- Root Extraction.
- Word Segmentation
- Word normalization
- Default Arabic Affixes list.
- An customizable Light stemmer: possibility of change
stemmer options and data.
- Data independent stemmer
@author: Taha Zerrouki <taha_zerrouki at gmail dot com>
@author: Taha Zerrouki
@contact: taha dot zerrouki at gmail dot com
@copyright: Arabtechies, Arabeyes, Taha Zerrouki
@license: GPL
@date:2017/02/15
@version:0.3
"""
def __init__(self):
#load affix information
# pass
self.prefix_letters = stem_const.DEFAULT_PREFIX_LETTERS
self.suffix_letters = stem_const.DEFAULT_SUFFIX_LETTERS
self.infix_letters = stem_const.DEFAULT_INFIX_LETTERS
self.max_prefix_length = stem_const.DEFAULT_MAX_PREFIX
self.max_suffix_length = stem_const.DEFAULT_MAX_SUFFIX
self.min_stem_length = stem_const.DEFAULT_MIN_STEM
self.joker = stem_const.DEFAULT_JOKER
self.prefix_list = stem_const.DEFAULT_PREFIX_LIST
self.suffix_list = stem_const.DEFAULT_SUFFIX_LIST
# root dictionary
self.root_list = roots_const.ROOTS
# lists used to validate affixation
#~ self.valid_affixes_list = []
self.valid_affixes_list = set(list(affix_const.VERB_AFFIX_LIST) + list(affix_const.NOUN_AFFIX_LIST))
self.word = u""
self.unvocalized = u""
self.normalized = u""
self.starword = u""
self.root = u""
self.left = 0
self.right = 0
self.segment_list = []
#token pattern
# letters and harakat
self.token_pat = re.compile(u"[^\w\u064b-\u0652']+", re.UNICODE)
self.prefixes_tree = self._create_prefix_tree(self.prefix_list)
self.suffixes_tree = self._create_suffix_tree(self.suffix_list)
######################################################################
#{ Attribut Functions
######################################################################
def get_prefix_letters(self, ):
""" return the prefixation letters.
This constant take DEFAULT_PREFIX_LETTERS by default.
@return: return a letters.
@rtype: unicode.
"""
return self.prefix_letters
def set_prefix_letters(self, new_prefix_letters):
""" set the prefixation letters.
This constant take DEFAULT_PREFIX_LETTERS by default.
@param new_prefix_letters: letters to be striped from a word,
e.g.new_prefix_letters = u"وف":.
@type new_prefix_letters: unicode.
"""
self.prefix_letters = new_prefix_letters
def get_suffix_letters(self, ):
""" return the suffixation letters.
This constant take DEFAULT_SUFFIX_LETTERS by default.
@return: return a letters.
@rtype: unicode.
"""
return self.suffix_letters
def set_suffix_letters(self, new_suffix_letters):
""" set the suffixation letters.
This constant take DEFAULT_SUFFIX_LETTERS by default.
@param new_suffix_letters: letters to be striped from the end of a word,
e.g.new_suffix_letters = u"ةون":.
@type new_suffix_letters: unicode.
"""
self.suffix_letters = new_suffix_letters
def get_infix_letters(self, ):
""" get the inffixation letters.
This constant take DEFAULT_INFIX_LETTERS by default.
@return: infixes letters.
@rtype: unicode.
"""
return self.infix_letters
def set_infix_letters(self, new_infix_letters):
""" set the inffixation letters.
This constant take DEFAULT_INFIX_LETTERS by default.
@param new_infix_letters: letters to be striped from the middle
of a word, e.g.new_infix_letters = u"أوي":.
@type new_infix_letters: unicode.
"""
self.infix_letters = new_infix_letters
def get_joker(self, ):
""" get the joker letter.
This constant take DEFAULT_JOKER by default.
@return: joker letter.
@rtype: unicode.
"""
return self.joker
def set_joker(self, new_joker):
""" set the joker letter.
This constant take DEFAULT_JOKER by default.
@param new_joker: joker letter.
@type new_joker: unicode.
"""
if len(new_joker) > 1:
new_joker = new_joker[0]
self.joker = new_joker
def get_max_prefix_length(self, ):
""" return the constant of max length of the prefix used by the stemmer.
This constant take DEFAULT_MAX_PREFIX_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.max_prefix_length
def set_max_prefix_length(self, new_max_prefix_length):
""" Set the constant of max length of the prefix used by the stemmer.
This constant take DEFAULT_MAX_PREFIX_LENGTH by default.
@param new_max_prefix_length: the new max prefix length constant.
@type new_max_prefix_length: integer.
"""
self.max_prefix_length = new_max_prefix_length
def get_max_suffix_length(self, ):
""" return the constant of max length of the suffix used by the stemmer.
This constant take DEFAULT_MAX_SUFFIX_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.max_suffix_length
def set_max_suffix_length(self, new_max_suffix_length):
""" Set the constant of max length of the suffix used by the stemmer.
This constant take DEFAULT_MAX_SUFFIX_LENGTH by default.
@param new_max_suffix_length: the new max suffix length constant.
@type new_max_suffix_length: integer.
"""
self.max_suffix_length = new_max_suffix_length
def get_min_stem_length(self, ):
""" return the constant of min length of the stem used by the stemmer.
This constant take DEFAULT_MIN_STEM_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.min_stem_length
def set_min_stem_length(self, new_min_stem_length):
""" Set the constant of min length of the stem used by the stemmer.
This constant take DEFAULT_MIN_STEM_LENGTH by default.
@param new_min_stem_length: the min stem length constant.
@type new_min_stem_length: integer.
"""
self.min_stem_length = new_min_stem_length
def get_prefix_list(self, ):
""" return the prefixes list used by the stemmer.
This constant take DEFAULT_PREFIX_LIST by default.
@return: prefixes list.
@rtype: set().
"""
return self.prefix_list
def set_prefix_list(self, new_prefix_list):
""" Set prefixes list used by the stemmer.
This constant take DEFAULT_PREFIX_LIST by default.
@param new_prefix_list: a set of prefixes.
@type new_prefix_list: set of unicode string.
"""
self.prefix_list = new_prefix_list
self._create_prefix_tree(self.prefix_list)
def get_suffix_list(self, ):
""" return the suffixes list used by the stemmer.
This constant take DEFAULT_SUFFIX_LIST by default.
@return: suffixes list.
@rtype: set().
"""
return self.suffix_list
def set_suffix_list(self, new_suffix_list):
""" Set suffixes list used by the stemmer.
This constant take DEFAULT_SUFFIX_LIST by default.
@param new_suffix_list: a set of suffixes.
@type new_suffix_list: set of unicode string.
"""
self.suffix_list = new_suffix_list
self._create_suffix_tree(self.suffix_list)
def get_roots_list(self, ):
""" return the roots list used by the stemmer to validate roots.
This constant take roots_const.ROOTS by default.
@return: roots list.
@rtype: set().
"""
return self.roots_list
def set_roots_list(self, new_roots_list):
""" Set roots list used by the stemmer to validate roots..
This constant take roots_const.ROOTS by default.
@param new_roots_list: a set of roots.
@type new_roots_list: set of unicode string.
"""
self.roots_list = new_roots_list
def get_valid_affixes_list(self, ):
""" return the valid_affixes list used by the stemmer to validate affixes.
This constant take valid_affixes_const.ROOTS by default.
@return: valid_affixes list.
@rtype: set().
"""
return self.valid_affixes_list
def set_valid_affixes_list(self, new_valid_affixes_list):
""" Set valid_affixes list used by the stemmer to validate affixes..
This constant take valid_affixes_const.ROOTS by default.
@param new_valid_affixes_list: a set of valid_affixes.
@type new_valid_affixes_list: set of unicode string.
"""
self.valid_affixes_list = new_valid_affixes_list
def set_word(self, new_word):
""" Set the word to treat by the stemmer.
@param new_word: the new word.
@type new_word: unicode.
"""
self.word = new_word
def get_word(self):
""" return the last word treated by the stemmer.
@return: word.
@rtype: unicode.
"""
return self.word
#########################################################
#{ Calculated Attribut Functions
#########################################################
def get_starword(self):
""" return the starlike word treated by the stemmer.
All non affix letters are converted to a joker.
The joker take by default DEFAULT_JOKER = "*".
Exmaple:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
@return: word.
@rtype: unicode.
"""
return self.starword
def get_root(self, prefix_index=-1, suffix_index=-1):
""" return the root of the treated word by the stemmer.
All non affix letters are converted to a joker.
All letters in the joker places are part of root.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_root()
ضرب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: root.
@rtype: unicode.
"""
# extract a root for a specific stem
if prefix_index >= 0 or suffix_index >= 0:
self.extract_root(prefix_index, suffix_index)
else:
self.root = self._choose_root()
return self.root
def _choose_root(self,):
""" choose a root for the given word """
if arabicstopwords.is_stop(self.word):
return arabicstopwords.stop_root(self.word)
if not self.segment_list:
self.segment(self.word)
affix_list = self.get_affix_list()
roots = [d['root'] for d in affix_list]
# filter by length
roots_tmp = roots
accepted = list(filter(self.is_root_length_valid, roots_tmp))
if accepted: # avoid empty list
roots_tmp = accepted
# filter by dictionary
accepted = list(filter(self.is_root, roots_tmp) )
if accepted: # avoid empty list
roots_tmp = accepted
# choose the most frequent root
accepted_root = self.most_common(roots_tmp)
return accepted_root
def _choose_stem(self,):
""" choose a stem for the given word """
# if word is stop word
if arabicstopwords.is_stop(self.word):
return arabicstopwords.stop_stem(self.word)
if not self.segment_list:
self.segment(self.word)
seg_list = self.segment_list
# verify affix against an affix list
seg_list = [(x,y) for (x,y) in seg_list if self._verify_affix(x,y)]
# choose the shortest stem
if not seg_list: # if empty
left = 0
right = len(self.word)
else:
left, right = self.get_left_right(seg_list)
return self.unvocalized[left:right]
def get_normalized(self):
""" return the normalized form of the treated word by the stemmer.
Some letters are converted into normal form like Hamzat.
Example:
>>> word = u"استؤجرُ"
>>> ArListem = ArabicLightStemmer()
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_normalized()
استءجر
@return: normalized word.
@rtype: unicode.
"""
return self.normalized
def get_unvocalized(self):
""" return the unvocalized form of the treated word by the stemmer.
Harakat are striped.
Example:
>>> word = u"الْعَرَبِيّةُ"
>>> ArListem = ArabicLightStemmer()
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_unvocalized()
العربية
@return: unvocalized word.
@rtype: unicode.
"""
return self.unvocalized
def get_left(self):
""" return the the left position of stemming
(prefixe end position )in the word treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_left()
3
@return: the left position of stemming.
@rtype: integer.
"""
return self.left
def get_right(self):
""" return the the right position of stemming
(suffixe start position )in the word treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_right()
6
@return: the right position of stemming.
@rtype: integer.
"""
return self.right
def get_stem(self, prefix_index=-1, suffix_index=-1):
""" return the stem of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_stem()
كاتب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: stem.
@rtype: unicode.
"""
#~ # ask for default stem
#~ if prefix_index < 0 and suffix_index < 0:
#~ return self._choose_stem()
if prefix_index >= 0 or suffix_index >= 0:
if prefix_index < 0:
left = self.stem_left
#~ left = self.left
else:
left = prefix_index
if suffix_index < 0:
right = self.stem_right
#~ right = self.right
else:
right = suffix_index
return self.unvocalized[left:right]
else:
stem = self._choose_stem()
return stem
def _handle_teh_infix(self, starword, left, right):
"""
Handle case of Teh as infix.
The Teh can be Dal after Zain, and Tah after Dhad
"""
newstarstem = starword
# case of Teh marbuta
key_stem = newstarstem.replace(araby.TEH_MARBUTA,'')
if len(key_stem) != 4:
# apply teh and variants only one stem has 4 letters
newstarstem = re.sub(u"[%s%s%s]"%(araby.TEH, araby.TAH, araby.DAL), self.joker, newstarstem)
return newstarstem
# substitube teh in infixes the teh mst be in the first
# or second place, all others, are converted
newstarstem = newstarstem[:2]+re.sub(araby.TEH, self.joker, newstarstem[2:])
# Tah طاء is infix if it's preceded by DHAD only
if self.word[left:right].startswith(u"ضط"):
newstarstem = newstarstem[:2]+re.sub(araby.TAH, self.joker, newstarstem[2:])
else:
newstarstem = re.sub(araby.TAH, self.joker, newstarstem)
# DAL دال is infix if it's preceded by زاي only
if self.word[left:right].startswith(u"زد"):
newstarstem = newstarstem[:2]+re.sub(araby.DAL, self.joker, newstarstem[2:])
else:
newstarstem = re.sub(araby.DAL, self.joker, newstarstem)
return newstarstem
def get_starstem(self, prefix_index=-1, suffix_index=-1):
""" return the star form stem of the treated word by the stemmer.
All non affix letters are converted to a joker.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_stem()
كاتب
>>> print ArListem.get_starstem()
*ات*
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: stared form of stem.
@rtype: unicode.
"""
#~ starword = self.starword
starword = self.word
if prefix_index < 0 and suffix_index < 0:
return starword[self.left:self.right]
else:
left = self.left
right = self.right
if prefix_index >= 0:
left = prefix_index
if suffix_index >= 0:
right = suffix_index
if self.infix_letters != "":
newstarstem = re.sub(u"[^%s%s]"%(self.infix_letters, araby.TEH_MARBUTA), \
self.joker, starword[left:right])
# substitube teh in infixes the teh mst be in the first
# or second place, all others, are converted
newstarstem = self._handle_teh_infix(newstarstem, left, right)
else:
newstarstem = self.joker*len(starword[left:right])
#~ print("star word", starword, newstarstem)
return newstarstem
def get_prefix(self, prefix_index=-1):
""" return the prefix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_prefix()
أفت
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@return: prefixe.
@rtype: unicode.
"""
if prefix_index < 0:
return self.unvocalized[:self.left]
else:
return self.unvocalized[:prefix_index]
def get_suffix(self, suffix_index=-1):
""" return the suffix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_suffix()
انني
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: suffixe.
@rtype: unicode.
"""
if suffix_index < 0:
return self.unvocalized[self.right:]
else:
return self.unvocalized[suffix_index:]
def get_affix(self, prefix_index=-1, suffix_index=-1):
""" return the affix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_affix()
أفت-انني
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: in4teger.
@return: suffixe.
@rtype: unicode.
"""
return u"-".join([self.get_prefix(prefix_index), \
self.get_suffix(suffix_index)])
def get_affix_tuple(self, prefix_index=-1, suffix_index=0):
""" return the affix tuple of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_affix_tuple()
{'prefix': u'أفت', 'root': u'ضرب', 'suffix': u'انني', 'stem': u'ضارب'}
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: affix tuple.
@rtype: dict.
"""
return {
'prefix':self.get_prefix(prefix_index),
'suffix':self.get_suffix(suffix_index),
'stem':self.get_stem(prefix_index, suffix_index),
'starstem':self.get_starstem(prefix_index, suffix_index),
'root':self.get_root(prefix_index, suffix_index),
}
#########################################################
#{ Stemming Functions
#########################################################
def light_stem(self, word):
u"""
Stemming function, stem an arabic word, and return a stem.
This function store in the instance the stemming positions
(left, right), then it's possible to get other calculted
attributs like: stem, prefixe, suffixe, root.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> stem = ArListem.light_stem(word)
>>> print ArListem.get_stem()
ضارب
>>> print ArListem.get_starstem()
*ا**
>>> print ArListem.get_left()
3
>>> print ArListem.get_right()
6
>>> print ArListem.get_root()
ضرب
@param word: the input word.
@type word: unicode.
@return: stem.
@rtype: unicode.
"""
if word == u'':
return u''
#~ starword, left, right = self.transform2stars(word)
self.transform2stars(word)
# segment
self.segment(word)
#constitute the root
#~ self.extract_root()
return self.get_stem()
def transform2stars(self, word):
"""
Transform all non affixation letters into a star.
the star is a joker(by default '*').
which indicates that the correspandent letter is an original.
this function is used by the stmmer to identify original letters.
and return a stared form and stemming positions (left, right)
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> starword, left, right = ArListem.transformToStrars(word)
(أفت*ا**انني, 3, 6)
@param word: the input word.
@type word: unicode
@return: (starword, left, right):
- starword: all original letters converted into a star
- left: the greater possible left stemming position.
- right: the greater possible right stemming position.
@rtype: tuple.
"""
self.word = word
word = araby.strip_tashkeel(word)
# word, harakat = araby.separate(word)
self.unvocalized = word
word = re.sub(u"[%s]"%(araby.ALEF_MADDA), araby.HAMZA+araby.ALEF, word)
#~ word = re.sub(u"[^%s%s%s]"%(self.prefix_letters, self.suffix_letters, self.infix_letters), \
word = re.sub(u"[^%s%s]"%(self.prefix_letters, self.suffix_letters), \
self.joker, word)
#~ ln = len(word)
left = word.find(self.joker)
right = word.rfind(self.joker)
if left >= 0:
left = min(left, self.max_prefix_length-1)
right = max(right+1, len(word)-self.max_suffix_length)
prefix = word[:left]
#stem get the original word and make all letters as jokers except infixes
stem = self.word[left:right]
suffix = word[right:]
prefix = re.sub(u"[^%s]"%self.prefix_letters, self.joker, prefix)
# avoid null infixes
if self.infix_letters:
stem = re.sub(u"[^%s]"%self.infix_letters, self.joker, stem)
suffix = re.sub(u"[^%s]"%self.suffix_letters, self.joker, suffix)
word = prefix+stem+suffix
left = word.find(self.joker)
right = word.rfind(self.joker)
# prefix_list = self.PREFIX_LIST
# suffix_list = self.SUFFIX_LIST
if left < 0:
left = min(self.max_prefix_length, len(word)-2)
if left >= 0:
prefix = word[:left]
while prefix != "" and prefix not in self.prefix_list:
prefix = prefix[:-1]
if right < 0:
right = max(len(prefix), len(word)-self.max_suffix_length)
suffix = word[right:]
while suffix and suffix not in self.suffix_list:
suffix = suffix[1:]
left = len(prefix)
right = len(word)-len(suffix)
#stem get the original word and make all letters as jokers except infixes
stem = self.word[left:right]
# convert stem into stars.
# a stem must starts with alef, or end with alef.
# any other infixes letter isnt infixe at
#the border of the stem.
#substitute all non infixes letters
if self.infix_letters:
stem = re.sub(u"[^%s]"%self.infix_letters, self.joker, stem)
word = prefix+stem+suffix
# store result
self.stem_left = left
self.stem_right = right
self.starword = word
#~ self.extract_root()
# return starword, left, right position of stem
return (word, left, right)
def extract_root(self, prefix_index=-1, suffix_index=-1):
""" return the root of the treated word by the stemmer.
All non affix letters are converted to a joker.
All letters in the joker places are part of root.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_root()
ضرب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default
word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: root.
@rtype: unicode.
"""
stem = self.get_stem(prefix_index, suffix_index)
root = u""
# if the stem has 3 letters it can be the root directly
if len(stem) == 3:
self.root = self._ajust_root(root, stem)
return self.root
starstem = self.get_starstem(prefix_index, suffix_index)
root = u""
if len(starstem) == len(stem):
for i, char in enumerate(stem):
if starstem[i] == self.joker:
root += char
else:
root = stem
# normalize root
root = self.normalize_root(root)
#controls on root letters and length
#~ if not self.is_root_length_valid(root):
#~ root = ""
if len(root) == 2:
root = self._ajust_root(root, starstem)
self.root = root
return root
def _ajust_root(self, root, starstem):
"""
If the root has only three or two letters, we complete it by another letter
"""
if not starstem:
return root
if len(starstem) == 3:
starstem = starstem.replace(araby.ALEF, araby.WAW)
starstem = starstem.replace(araby.ALEF_MAKSURA, araby.YEH)
return starstem
# The starstem can starts with a joker (*) or a infix letter
# add a letter at the begining
first = starstem[0]
last = starstem[-1:]
if first in (araby.ALEF, araby.WAW):
root = araby.WAW + root
elif first == araby.YEH:
root = araby.YEH + root
elif first == self.joker and last in (araby.ALEF, araby.WAW):
root += araby.WAW
elif first == self.joker and last in (araby.ALEF_MAKSURA, araby.YEH):
root += araby.WAW
elif first == self.joker and last == self.joker:
# if lenght == 2, is doubled verb
if len(starstem) == 2:
root += root[-1]
else:
# I choose WAW because it's frequent
root = root[0]+ araby.WAW+ root[1]
return root
def _create_prefix_tree(self, prefixes):
"""
Create a prefixes tree from given prefixes list
@param prefixes: list of prefixes
@type prefixes: list of unicode
@return: prefixes tree
@rtype: Tree stucture
"""
prefixestree = {}
for prefix in prefixes:
# print prefix.encode('utf8')
branch = prefixestree
for char in prefix:
if char not in branch:
branch[char] = {}
branch = branch[char]
# branch['#'] = '#' # the hash # as an end postion
if '#' in branch:
branch['#'][prefix] = "#"
else:
branch['#'] = {prefix:"#", }
self.prefixes_tree = prefixestree
return self.prefixes_tree
def _create_suffix_tree(self, suffixes):
"""
Create a suffixes tree from given suffixes list
@param suffixes: list of suffixes
@type suffixes: list of unicode
@return: suffixes tree
@rtype: Tree stucture
"""
suffixestree = {}
for suffix in suffixes:
# print (u"'%s'"%suffix).encode('utf8')
branch = suffixestree
#reverse a string
for char in suffix[::-1]:
if char not in branch:
branch[char] = {}
branch = branch[char]
# branch['#'] = '#' # the hash # as an end postion
if "#" in branch:
branch['#'][suffix] = "#"
else:
branch['#'] = {suffix:"#", }
self.suffixes_tree = suffixestree
return self.suffixes_tree
def lookup_prefixes(self, word):
"""
lookup for prefixes in the word
@param word: the given word
@type word: unicode
@return: list of prefixes starts positions
@rtype: list of int
"""
branch = self.prefixes_tree
lefts = [0, ]
i = 0
while i < len(word) and word[i] in branch:
if "#" in branch:
# if branch['#'].has_key(word[:i]):
lefts.append(i)
if word[i] in branch:
branch = branch[word[i]]
else:
# i += 1
break
i += 1
if i < len(word) and "#" in branch:
lefts.append(i)
return lefts
def lookup_suffixes(self, word):
"""
lookup for suffixes in the word
@param word: the given word
@type word: unicode
@return: list of suffixes starts positions
@rtype: list of int
"""
branch = self.suffixes_tree
suffix = ''
# rights = [len(word)-1, ]
rights = []
i = len(word)-1
while i >= 0 and word[i] in branch:
suffix = word[i]+suffix
if '#' in branch:
# if branch['#'].has_key(word[i:]):
# rights.append(i)
rights.append(i+1)
if word[i] in branch:
branch = branch[word[i]]
else:
# i -= 1
break
i -= 1
if i >= 0 and "#" in branch:#and branch['#'].has_key(word[i+1:]):
rights.append(i+1)
return rights
#########################################################
#{ Segmentation Functions
#########################################################
def segment(self, word):
""" generate a list of all possible segmentation positions
(lef, right) of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> print ArListem.segment(word)
set(([(1, 5), (2, 5), (0, 7)])
@return: List of segmentation
@rtype: set of tuple of integer.
"""
self.word = word
self.unvocalized = araby.strip_tashkeel(word)
# word, harakat = araby.separate(word)
word = re.sub(u"[%s]"%(araby.ALEF_MADDA), araby.HAMZA+araby.ALEF, word)
# get all lefts position of prefixes
lefts = self.lookup_prefixes(word)
# get all rights position of suffixes
rights = self.lookup_suffixes(word)
if lefts:
self.left = max(lefts)
else:
self.left = -1
if rights:
self.right = min(rights)
else:
self.right = -1
#~ ln = len(word)
self.segment_list = set([(0, len(word))])
# print lefts, rights
for i in lefts:
for j in rights:
if j >= i+2 :
self.segment_list.add((i, j))
# filter segment according to valid affixes list
self.left, self.right = self.get_left_right(self.segment_list)
return self.segment_list
# #########################################################
# #{ Segmentation Functions
# #########################################################
def get_segment_list(self):
""" return a list of segmentation positions (left, right)
of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> ArListem.segment(word)
>>> print ArListem.get_segment_list()
set(([(1, 5), (2, 5), (0, 7)])
@return: List of segmentation
@rtype: set of tuple of integer.
"""
return self.segment_list
def get_affix_list(self, seg_list=[]):
u""" return a list of affix tuple of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> ArListem.segment(word)
>>> print ArListem.get_affix_list()
[{'prefix': u'ف', 'root': u'ضرب', 'suffix': u'\u064aن', 'stem': u'تضرب'},
{'prefix': u'فت', 'root': u'ضرب', 'suffix': u'\u064aن', 'stem': u'ضرب'},
{'prefix': u'', 'root': u'فضربن', 'suffix': u'', 'stem': u'فتضرب\u064aن'}]
@return: List of Affixes tuple
@rtype: list of dict.
"""
if not seg_list:
seg_list = self.segment_list
affix_list = []
for left,right in seg_list:
affix_list.append(self.get_affix_tuple(left, right))
return affix_list
def _valid_stem(self, stem, tag="noun", prefix=""):
""" Test if the stem is accepted"""
if not stem:
return False
# valid stems for verbs
if tag == "verb":
# verb has length <= 6
if len(stem) > 6 or len(stem) < 2:
return False
# forbidden letters in verbs like Teh Marbuta
elif araby.TEH_MARBUTA in stem:
return False
# 6 letters stem must starts with ALEF
elif len(stem) == 6 and not stem.startswith(araby.ALEF):
return False
# 5 letters stem must starts with ALEF/TEH or SEEN (a 6 letters verbs striped from Alef)
# قد يكون الجذع الخماسي فعلا خماسيا
# لذا يجب أن يبدأ بالتاء أو الألف
# أما إذا كن منقلبنا عن فعل سداسي،
# مثل استغفر، افرنقع،
# فيجب أن يكون مسبوقا بحرف يحذف ألف الوصل
elif len(stem) == 5 and not stem[0] in (araby.ALEF, araby.TEH):
if prefix[-1:] in (araby.YEH, araby.TEH, araby.NOON, araby.ALEF_HAMZA_ABOVE):
return False
# لا يقبل ألف بعد حرف مضارعة
elif stem.startswith(araby.ALEF) and prefix[-1:] in (araby.YEH, araby.NOON, araby.TEH, araby.ALEF_HAMZA_ABOVE, araby.ALEF):
return False
## lookup for stamp
if not verb_stamp_const.is_verb_stamp(stem):
return False
elif tag == "noun":
if len(stem) >= 8 :
return False
return True
return True
def _verify_affix(self, prefix_index=-1, suffix_index=-1):
"""
validate affixes against a list of valid affixes
"""
prefix = self.get_prefix(prefix_index)
suffix = self.get_suffix(suffix_index)
TAG = True
if TAG:
affix = prefix+'-'+suffix
stem = self.get_stem(prefix_index, suffix_index)
if affix in affix_const.VERB_AFFIX_LIST and self._valid_stem(stem,"verb", prefix):
# is a valid verb stem
if affix in affix_const.NOUN_AFFIX_LIST and self._valid_stem(stem,"noun"):
# is also a noun stem
return True # TAG VN
else:
return True # TAG V
else:
if affix in affix_const.NOUN_AFFIX_LIST and self._valid_stem(stem,"noun"):
return True # TAG N
else:
return False # not a valid verb or not a noun
return True
if self.valid_affixes_list :
affix = prefix+'-'+suffix
return affix in self.valid_affixes_list
else:
#مراجعة مبسطة
# أل التعريف مع ضمير متصل
if ((u"ال" in prefix or u"لل" in prefix) and
(u'ه' in suffix or u'ك' in suffix)
):
return False
# التاء المربوطة مع حروف المضارعة
if ((u"ي" in prefix or u"يس" in prefix or u"نس" in prefix
or u"تس" in prefix or u"سي" in prefix or u"سأ" in prefix) and
(u'ة' in suffix)
):
return False
# التاء المتحركة مع حروف المضارعة
if ((u"ي" in prefix or u"يس" in prefix or u"نس" in prefix
or u"تس" in prefix or u"سي" in prefix or u"سأ" in prefix) and
(u'تم' in suffix or u'تن' in suffix )
):
return False
# حروف الجر مع واو جمع مذكر سالم
#ولمثنى المرفوع
if ((u"ك" in prefix or u"ب" in prefix or u"لل" in prefix) and
(u'و' in suffix or u'ان' in suffix)
):
return False
return True
###############################################################
#{ General Functions
###############################################################
def normalize(self, word=u""):
"""
Normalize a word.
Convert some leters forms into unified form.
@param word: the input word, if word is empty,
the word member of the class is normalized.
@type word: unicode.
@return: normalized word.
@rtype: unicode.
"""
if word == u'' and self.word == u"":
return u""
elif word != u'':
self.word = word
else:
word = self.word
self.normalized = normalize.normalize_searchtext(word)
return self.normalized
def tokenize(self, text=u""):
"""
Tokenize text into words
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
"""
if not text:
return []
else:
mylist = self.token_pat.split(text)
if u'' in mylist:
mylist.remove(u'')
return mylist
@staticmethod
def normalize_root(word):
""" test if word is a root"""
# change alef madda to hamza + ALEF
word = word.replace(araby.ALEF_MADDA, araby.HAMZA+ araby.ALEF)
word = word.replace(araby.TEH_MARBUTA, '')
word = word.replace(araby.ALEF_MAKSURA, araby.YEH)
return araby.normalize_hamza(word)
@staticmethod
def is_root_length_valid(root):
return (len(root) >= 2 and len(root)<=4)
@staticmethod
def most_common(lst):
triroots = [x for x in lst if len(x) == 3]
if triroots:
lst = triroots
return max(set(lst), key=lst.count)
def is_root(self, word):
""" test if word is a root"""
return word in self.root_list
@staticmethod
def get_left_right(ls):
"""
get the max left and the min right
"""
if not ls:
return -1,-1
l,_= max(ls)
r = min([y for (x,y) in ls if x==l])
return l, r
if __name__ == "__main__":
#~ from pyarabic.arabrepr import arepr as repr
ARLISTEM = ArabicLightStemmer()
wordlist =[u'أفتضاربانني',
u'بالمكتبة',
u'مزدهرة',
u'كاتب',
u'مضروب',
u'مضارب',
u"مردود",
u"مطلوب",
u"مشتت",
u'مزتهرة',
u'مضطرب',
u'بالمكتبة',
u'مالبدرسمه',
u"مكتوب",
u"الآجال",
u"بالبلدان",
u"وفيهما",
u"1245",
u"Taha",
u"@",
]
for word in wordlist:
# stemming word
ARLISTEM.light_stem(word)
# extract stem
print("stem", ARLISTEM.get_stem())
print(ARLISTEM.infix_letters)
# extract root
print("root:", ARLISTEM.get_root())
# get prefix position index
print("left",ARLISTEM.get_left())
print("left stem",ARLISTEM.stem_left)
# get prefix
print(ARLISTEM.get_prefix())
# get prefix with a specific index
print(ARLISTEM.get_prefix(2))
# get suffix position index
print("right",ARLISTEM.get_right())
print("right_stem",ARLISTEM.stem_right)
# get suffix
print("suffix", ARLISTEM.get_suffix())
# get suffix with a specific index
print(ARLISTEM.get_suffix(10))
# get affix tuple
print(ARLISTEM.get_affix_tuple())
# star words
print("starword", ARLISTEM.get_starword())
# get star stem
print("starstem",ARLISTEM.get_starstem())
# get normalized word
print("normalize", ARLISTEM.get_normalized())
# get unvocalized word
print("unvocalized",ARLISTEM.get_unvocalized())
# Detect all possible segmentation
print(ARLISTEM.segment(word))
print(ARLISTEM.get_segment_list())
# get affix list
print(repr(ARLISTEM.get_affix_list()))
|
gpl-3.0
| -4,222,205,376,867,603,500
| 34.792945
| 136
| 0.546386
| false
| 3.685274
| false
| false
| false
|
fpsw/Servo
|
servo/forms/devices.py
|
1
|
2204
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from servo.models import Tag, Device, Customer
from servo.forms import DatepickerInput, AutocompleteCharField
product_lines = [(k, x['name']) for k, x in Device.PRODUCT_LINES.items()]
class DeviceSearchForm(forms.Form):
product_line = forms.MultipleChoiceField(
choices=product_lines,
required=False
)
warranty_status = forms.MultipleChoiceField(
choices=Device.WARRANTY_CHOICES,
required=False,
)
date_start = forms.DateField(
required=False,
label=_('Created between'),
widget=DatepickerInput(attrs={'class': 'input-small'})
)
date_end = forms.DateField(
required=False,
label=mark_safe(' '),
widget=DatepickerInput(attrs={'class': 'input-small'})
)
sn = forms.CharField(required=False, label=_('Serial number contains'))
def __init__(self, *args, **kwargs):
super(DeviceSearchForm, self).__init__(*args, **kwargs)
self.fields['description'] = AutocompleteCharField('/api/device_models/',
max_length=128,
required=False,
label=_('Description contains')
)
class DeviceForm(forms.ModelForm):
"""The form for editing devices in the /devices view"""
"""
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.filter(type='device'),
required=False
)
"""
class Meta:
model = Device
exclude = ('spec', 'customers', 'files', 'image_url',
'exploded_view_url', 'manual_url', )
widgets = {'purchased_on': DatepickerInput()}
class DeviceUploadForm(forms.Form):
datafile = forms.FileField(
help_text=_('Device data in Excel format (.xls or .xlsx)')
)
customer = forms.IntegerField(
required=False,
widget=forms.HiddenInput,
)
do_warranty_check = forms.BooleanField(
required=False,
initial=True,
help_text=_('Perform warranty check on uploaded serial numbers')
)
class DiagnosticsForm(forms.Form):
pass
|
bsd-2-clause
| -8,518,432,263,230,593,000
| 28.783784
| 81
| 0.632033
| false
| 4.066421
| false
| false
| false
|
kevinselwyn/pokestop
|
api.py
|
1
|
5080
|
#!/usr/bin/python
# coding=utf-8
"""Pokéstop API"""
import sys
import argparse
import json
from pokestop import Pokestop
from flask import Flask, jsonify
from flask_restful import Api, reqparse, Resource
#----------------------------------------------------------------#
# Constants
HOSTNAME = '0.0.0.0'
PORT = 5000
MIME = 'application/json'
#----------------------------------------------------------------#
# Utilities
def custom_error(status_code=404, message=''):
"""Returns custom JSON error"""
response = jsonify({
'status': status_code,
'message': message
})
response.status_code = status_code
response.content_type = MIME
return response
def get_args(variables=None):
"""Parses data or header arguments"""
parser = reqparse.RequestParser()
for variable, val in variables.items():
parser.add_argument(variable)
args = parser.parse_args()
output = {}
for key, val in args.items():
output[key] = val
for key, val in variables.items():
if not key in output or not output[key]:
output[key] = val
return output
def make_response(output):
response = API.make_response(output, 200)
response.headers['X-Best-Team'] = 'Team Mystic'
return response
#----------------------------------------------------------------#
# App
APP = Flask(__name__)
API = Api(APP)
#----------------------------------------------------------------#
# Errors
@APP.errorhandler(404)
def page_not_found(error):
return custom_error(404, 'Invalid endpoint')
#----------------------------------------------------------------#
# Nearby
class NearbyEndpoint(Resource):
"""Nearby endpoint"""
routes = [
'/nearby',
'/nearby'
]
@classmethod
def get(cls):
"""Gets nearby"""
args = get_args({
'SACSID': '<SACSID cookie>',
'csrftoken': '<csrftoken cookie>',
'latitude': '',
'longitude': '',
'minimum': 0,
'maximum': 1000,
'order': 'ASC',
'limit': 1000
})
if not 'SACSID' in args or not args['SACSID'] or not 'csrftoken' in args or not args['csrftoken']:
return custom_error(401, 'Unauthorized request')
if not 'latitude' in args or not args['latitude'] or not 'longitude' in args or not args['longitude']:
return custom_error(404, 'Missing latitude and longitude')
pokestop = Pokestop(args)
output = pokestop.entities()
response = make_response(json.loads(output))
return response
@classmethod
def post(cls):
"""Gets nearby by post"""
return cls.get()
API.add_resource(NearbyEndpoint, *NearbyEndpoint.routes)
#----------------------------------------------------------------#
# Pokéstop
class PokestopEndpoint(Resource):
"""Pokéstop endpoint"""
routes = [
'/pokestop',
'/pokestop'
]
@classmethod
def get(cls):
"""Gets Pokéstop"""
args = get_args({
'SACSID': '<SACSID cookie>',
'csrftoken': '<csrftoken cookie>',
'guid': '',
'latitude': '',
'longitude': '',
'minimum': 0,
'maximum': 1000,
'order': 'ASC',
'limit': 1000
})
if not 'SACSID' in args or not args['SACSID'] or not 'csrftoken' in args or not args['csrftoken']:
return custom_error(401, 'Unauthorized request')
if not 'guid' in args or not args['guid']:
return custom_error(404, 'Missing Pokéstop GUID')
pokestop = Pokestop(args)
output = pokestop.entity(args['guid'])
response = make_response(json.loads(output))
return response
@classmethod
def post(cls):
"""Gets Pokéstop by post"""
return cls.get()
API.add_resource(PokestopEndpoint, *PokestopEndpoint.routes)
#----------------------------------------------------------------#
# Main
def main(argc=0, argv=None):
"""Main function"""
parser = argparse.ArgumentParser()
flags = [
{'short': '-n', 'long': '--host'},
{'short': '-p', 'long': '--port'},
{'short': '-d', 'long': '--debug'}
]
arguments = [
{
'help': 'Host',
'required': False,
'action': 'store',
'default': HOSTNAME
},
{
'help': 'Port',
'required': False,
'action': 'store',
'default': PORT
},
{
'help': 'Debugging',
'required': False,
'action': 'store_true',
'default': False
}
]
for i in range(0, len(flags)):
parser.add_argument(flags[i]['short'], flags[i]['long'], **arguments[i])
args = parser.parse_args(argv[1:argc])
APP.run(host=args.host, port=args.port, debug=args.debug)
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
gpl-3.0
| 4,443,108,776,689,280,500
| 23.394231
| 110
| 0.493102
| false
| 4.085346
| false
| false
| false
|
Jamonek/Robinhood
|
docs/conf.py
|
1
|
2026
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = "pyrh"
copyright = "2020, Unofficial Robinhood Python API"
author = "Unofficial Robinhood Python API"
master_doc = "index"
exclude_patterns = ["stubs/*"] # ignore stubs from checks
# The full version, including alpha/beta/rc tags
release = "2.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"autodocsumm",
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# source_suffix = '.rst'
source_suffix = [".rst"]
# intersphinx
intersphinx_mapping = {
"requests": ("https://requests.readthedocs.io/en/master/", None),
}
# Autodoc
autodoc_default_flags = ["members"]
autosummary_generate = True
|
mit
| 3,453,407,910,459,892,000
| 31.15873
| 78
| 0.671273
| false
| 3.881226
| true
| false
| false
|
nke001/attention-lvcsr
|
libs/Theano/theano/sandbox/cuda/cula.py
|
1
|
3988
|
import pkg_resources
import theano
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
try:
from theano.sandbox.cuda import cuda_ndarray
dimshuffle = cuda_ndarray.cuda_ndarray.dimshuffle
except ImportError:
pass
cula_available = False
try:
from scikits.cuda import cula
cula_available = True
except (ImportError, OSError, pkg_resources.DistributionNotFound):
pass
cula_initialized = False
class GpuSolve(GpuOp):
"""
CULA GPU solver OP.
:param trans: Whether to take the transpose of the input matrix
or not.
"""
__props__ = ('trans',)
def __init__(self, trans='N'):
self.trans = trans
super(GpuSolve, self).__init__()
def output_type(self, inp):
return CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_node(self, inp1, inp2):
inp1 = as_cuda_ndarray_variable(inp1)
inp2 = as_cuda_ndarray_variable(inp2)
assert inp1.ndim == 2
assert inp2.ndim == 2
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def make_thunk(self,
node,
storage_map, _,
no_recycling=[]):
# Initialize CULA the first time it is needed
global cula_initialized
if not cula_available:
raise RuntimeError('Cula is not available and '
'GpuSolve Op can not be constructed.')
if not cula_initialized:
cula.culaInitialize()
cula_initialized = True
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
# size of the matrices to invert
z = outputs[0]
# Matrix
A = inputs[0][0]
# Solution vectors
b = inputs[1][0]
# A is not explicitly converted between C and F order, instead we
# switch the "transpose" flag
if self.trans in ('T', 'C'):
trans = 'N'
else:
trans = 'T'
# Convert b to F-order from c-order.
b_cpy = dimshuffle(b, (1, 0)).reshape((b.shape[0], b.shape[1]))
# This copy forces allocation of a new C-contiguous buffer
# and returns it.
A_cpy = A.copy()
b_cpy = b_cpy.copy()
def cula_gpu_solve(A_, b_, trans='T'):
A_shape = A_.shape
b_shape = b_.shape
assert(len(A_shape) == 2)
assert(len(b_shape) == 2)
if trans in ['T', 'C']:
l, n = A_shape
k, m = b_shape
if n != k:
raise ValueError('A and b must be aligned.')
elif trans in ['N']:
n, l = A_shape
k, m = b_shape
if l != m:
raise ValueError('A and b must be aligned.')
else:
raise ValueError('Invalid value for trans')
lda = max(1, n)
ldb = max(1, n, l)
# construct pointer arrays needed for culaDeviceSgels
# Cula requires you to pass a pointer for A and b.
A_ptr = A_.gpudata
b_ptr = b_.gpudata
cula.culaDeviceSgels(trans, n, l, m, A_ptr, lda, b_ptr, ldb)
return A_, b_
A_pycuda, b_pycuda = cula_gpu_solve(A_cpy, b_cpy, trans)
# Convert b to F-order from c-order and assign it to output:
b_cpy = b_cpy.reshape(b.shape[::-1])
b_cpy = dimshuffle(b_cpy, (1, 0))
z[0] = b_cpy
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
gpu_solve = GpuSolve()
|
mit
| 737,977,525,252,001,200
| 27.898551
| 77
| 0.515547
| false
| 3.890732
| false
| false
| false
|
tkln/HelvarNet
|
http_gateway.py
|
1
|
1266
|
#!/usr/bin/python3
import http.server
import socketserver
import helvar
helvarNet = helvar.HelvarNet('10.254.1.2', 50000)
leds = [helvar.LedUnit(helvarNet, '1.2.1.1'),
helvar.LedUnit(helvarNet, '1.2.1.2'),
helvar.LedUnit(helvarNet, '1.2.1.3'),
helvar.LedUnit(helvarNet, '1.2.1.4'),
helvar.LedUnit(helvarNet, '1.2.1.5')]
class Handler(http.server.BaseHTTPRequestHandler):
def __parse_url(self):
parts = self.path.split('/')
print(self.path)
return {'base' : parts[1],
'id' : int(parts[2]),
'level' : int(parts[3]),
'fade_time' : int(parts[4])}
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
req = self.__parse_url()
if (req['base'] == 'lamp'):
leds[req['id']].set(req['level'], req['fade_time'])
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
#self.wfile.close()
PORT = 8002
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
|
mit
| 3,007,065,090,095,122,400
| 28.44186
| 63
| 0.578989
| false
| 2.992908
| false
| false
| false
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM1_then1_ConnectedLHS.py
|
1
|
2650
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM1_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM1_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM1_then1_ConnectedLHS, self).__init__(name='HMM1_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM1_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
mit
| 8,863,518,393,962,690,000
| 42.442623
| 125
| 0.47434
| false
| 5.206287
| false
| false
| false
|
digris/openbroadcast.org
|
website/apps/alibrary/models/artistmodels.py
|
1
|
13676
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import uuid
import arating
import tagging
from alibrary.models import MigrationMixin, Relation, Profession
from alibrary.util.slug import unique_slugify
from alibrary.util.storage import get_dir_for_object, OverwriteStorage
from base.cacheops_extra import cached_uuid_aware
from base.mixins import TimestampedModelMixin
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.encoding import python_2_unicode_compatible
from django.utils import translation
from django_date_extensions.fields import ApproximateDateField
from django_extensions.db.fields import AutoSlugField
from l10n.models import Country
from tagging.registry import register as tagging_register
from .mediamodels import MediaArtists, MediaExtraartists, Media
from .releasemodels import Release
log = logging.getLogger(__name__)
LOOKUP_PROVIDERS = (("discogs", _("Discogs")), ("musicbrainz", _("Musicbrainz")))
def upload_image_to(instance, filename):
filename, extension = os.path.splitext(filename)
return os.path.join(get_dir_for_object(instance), "image%s" % extension.lower())
@python_2_unicode_compatible
class NameVariation(models.Model):
name = models.CharField(max_length=250, db_index=True)
artist = models.ForeignKey(
"Artist",
related_name="namevariations",
on_delete=models.CASCADE,
null=True,
blank=True,
)
class Meta:
app_label = "alibrary"
verbose_name = _("Name variation")
verbose_name_plural = _("Name variation")
ordering = ("name",)
def __str__(self):
return self.name
class ArtistManager(models.Manager):
def listed(self):
return self.get_queryset().filter(listed=True, priority__gt=0)
@python_2_unicode_compatible
class Artist(MigrationMixin, TimestampedModelMixin, models.Model):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=250, db_index=True)
slug = AutoSlugField(
populate_from="name", editable=True, blank=True, overwrite=True, db_index=True
)
TYPE_CHOICES = (
("person", _("Person")),
("group", _("Group")),
("orchestra", _("Orchestra")),
("other", _("Other")),
)
type = models.CharField(
verbose_name="Artist type",
max_length=128,
blank=True,
null=True,
choices=TYPE_CHOICES,
)
main_image = models.ImageField(
verbose_name=_("Image"),
upload_to=upload_image_to,
storage=OverwriteStorage(),
null=True,
blank=True,
)
real_name = models.CharField(max_length=250, blank=True, null=True)
disambiguation = models.CharField(max_length=256, blank=True, null=True)
country = models.ForeignKey(Country, blank=True, null=True)
booking_contact = models.CharField(
verbose_name=_("Booking"), max_length=256, blank=True, null=True
)
email = models.EmailField(
verbose_name=_("E-Mail"), max_length=256, blank=True, null=True
)
date_start = ApproximateDateField(
verbose_name=_("Begin"),
blank=True,
null=True,
help_text=_("date of formation / date of birth"),
)
date_end = ApproximateDateField(
verbose_name=_("End"),
blank=True,
null=True,
help_text=_("date of breakup / date of death"),
)
# properties to create 'special' objects. (like 'Unknown')
listed = models.BooleanField(
verbose_name="Include in listings",
default=True,
help_text=_("Should this Artist be shown on the default Artist-list?"),
)
disable_link = models.BooleanField(
verbose_name="Disable Link",
default=False,
help_text=_('Disable Linking. Useful e.g. for "Varius Artists"'),
)
disable_editing = models.BooleanField(
verbose_name="Disable Editing",
default=False,
help_text=_('Disable Editing. Useful e.g. for "Unknown Artist"'),
)
excerpt = models.TextField(blank=True, null=True)
biography = models.TextField(blank=True, null=True)
members = models.ManyToManyField(
"self",
through="ArtistMembership",
symmetrical=False,
)
aliases = models.ManyToManyField(
"self",
through="ArtistAlias",
related_name="artist_aliases",
blank=True,
symmetrical=False,
)
# relations a.k.a. links
relations = GenericRelation(Relation)
# tagging (d_tags = "display tags")
d_tags = tagging.fields.TagField(
max_length=1024,
verbose_name="Tags",
blank=True,
null=True,
)
professions = models.ManyToManyField(
Profession,
through="ArtistProfessions",
)
# user relations
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_owner",
on_delete=models.SET_NULL,
)
creator = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_creator",
on_delete=models.SET_NULL,
)
last_editor = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_last_editor",
on_delete=models.SET_NULL,
)
publisher = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_publisher",
on_delete=models.SET_NULL,
)
# identifiers
ipi_code = models.CharField(
verbose_name=_("IPI Code"), max_length=32, blank=True, null=True
)
isni_code = models.CharField(
verbose_name=_("ISNI Code"), max_length=32, blank=True, null=True
)
objects = ArtistManager()
class Meta:
app_label = "alibrary"
verbose_name = _("Artist")
verbose_name_plural = _("Artists")
ordering = ("name",)
def __str__(self):
return self.name
@property
def classname(self):
return self.__class__.__name__
def get_ct(self):
return "{}.{}".format(self._meta.app_label, self.__class__.__name__).lower()
def get_absolute_url(self):
if self.disable_link:
return None
return reverse("alibrary-artist-detail", kwargs={"uuid": str(self.uuid)})
def get_edit_url(self):
return reverse("alibrary-artist-edit", args=(self.pk,))
def get_admin_url(self):
return reverse("admin:alibrary_artist_change", args=(self.pk,))
def get_api_url(self):
return (
reverse(
"api_dispatch_detail",
kwargs={
"api_name": "v1",
"resource_name": "library/artist",
"pk": self.pk,
},
)
+ ""
)
@property
def description(self):
"""mapping to generic field"""
return self.biography
@cached_property
def get_membership(self):
""" get artists group/band membership """
return [m.parent for m in ArtistMembership.objects.filter(child=self)]
def get_alias_ids(self, exclude=None):
""" get ids of artists aliases """
exclude = exclude or []
alias_ids = []
parent_alias_ids = (
ArtistAlias.objects.filter(child__pk=self.pk)
.values_list("parent__pk", flat=True)
.distinct()
)
child_alias_ids = (
ArtistAlias.objects.filter(parent__pk=self.pk)
.values_list("child__pk", flat=True)
.distinct()
)
alias_ids.extend(parent_alias_ids)
alias_ids.extend(child_alias_ids)
for alias_id in alias_ids:
if not alias_id == self.pk and not alias_id in exclude:
exclude.append(alias_id)
alias_ids.extend(
Artist.objects.get(pk=alias_id).get_alias_ids(exclude=exclude)
)
return alias_ids
def get_aliases(self):
""" get artists aliases """
return (
Artist.objects.filter(pk__in=self.get_alias_ids([]))
.exclude(pk=self.pk)
.distinct()
)
###################################################################
# TODO: look for a better (=faster) way to get appearances!
###################################################################
@cached_uuid_aware(timeout=60 * 60 * 24)
def get_releases(self):
""" get releases where artist appears """
media_ids = []
qs_a = Media.objects.filter(artist=self)
qs_mediaartist = MediaArtists.objects.filter(artist=self)
media_ids += qs_a.values_list("id", flat=True)
media_ids += qs_mediaartist.values_list("media_id", flat=True)
return Release.objects.filter(
Q(media_release__pk__in=media_ids) | Q(album_artists__pk=self.pk)
).distinct()
@cached_uuid_aware(timeout=60 * 60 * 24)
def get_media(self):
""" get tracks where artist appears """
media_ids = []
qs_a = Media.objects.filter(artist=self)
qs_mediaartist = MediaArtists.objects.filter(artist=self)
qs_credited = MediaExtraartists.objects.filter(artist=self)
media_ids += qs_a.values_list("id", flat=True)
media_ids += qs_mediaartist.values_list("media_id", flat=True)
media_ids += qs_credited.values_list("media_id", flat=True)
return Media.objects.filter(pk__in=list(set(media_ids)))
def appearances(self):
""" get artists appearances (releases/tracks) """
try:
num_releases = self.get_releases().count()
except:
num_releases = 0
try:
num_media = self.get_media().count()
except:
num_media = 0
appearances = {"num_releases": num_releases, "num_media": num_media}
return appearances
def get_lookup_providers(self):
providers = []
for key, name in LOOKUP_PROVIDERS:
relations = self.relations.filter(service=key)
relation = None
if relations.exists():
relation = relations[0]
providers.append({"key": key, "name": name, "relation": relation})
return providers
def save(self, *args, **kwargs):
unique_slugify(self, self.name)
if self.type:
self.type = self.type.lower()
"""
TODO: implement otherwise
there is a special-case artist called "Various Artists" that should only exist once.
in the case - for whatever unplanned reason - there is a duplicate coming in we
add a counter to the name ensure uniqueness.
"""
if self.name == "Various Artists" and self.pk is None:
log.warning('attempt to create "Various Artists"')
original_name = self.name
i = 1
while Artist.objects.filter(name=self.name).count() > 0:
self.name = "%s %s" % (original_name, i)
i += 1
super(Artist, self).save(*args, **kwargs)
tagging_register(Artist)
arating.enable_voting_on(Artist)
# @receiver(post_save, sender=Artist)
# def action_handler(sender, instance, created, **kwargs):
# try:
# action_handler_task.delay(instance, created)
# except:
# pass
#
# @task
# def action_handler_task(instance, created):
# if created and instance.creator:
# action.send(instance.creator, verb=_('created'), target=instance)
#
# elif instance.last_editor:
# action.send(instance.last_editor, verb=_('updated'), target=instance)
@python_2_unicode_compatible
class ArtistMembership(models.Model):
parent = models.ForeignKey(
Artist, related_name="artist_parent", blank=True, null=True
)
child = models.ForeignKey(
Artist, related_name="artist_child", blank=True, null=True
)
profession = models.ForeignKey(
Profession, related_name="artist_membership_profession", blank=True, null=True
)
class Meta:
app_label = "alibrary"
verbose_name = _("Membersip")
verbose_name_plural = _("Membersips")
def __str__(self):
return '"%s" <> "%s"' % (self.parent.name, self.child.name)
def save(self, *args, **kwargs):
if not self.child or not self.parent:
self.delete()
super(ArtistMembership, self).save(*args, **kwargs)
@python_2_unicode_compatible
class ArtistAlias(models.Model):
parent = models.ForeignKey(Artist, related_name="alias_parent")
child = models.ForeignKey(Artist, related_name="alias_child")
class Meta:
app_label = "alibrary"
verbose_name = _("Alias")
verbose_name_plural = _("Aliases")
def __str__(self):
return '"%s" <> "%s"' % (self.parent.name, self.child.name)
@python_2_unicode_compatible
class ArtistProfessions(models.Model):
artist = models.ForeignKey("Artist")
profession = models.ForeignKey("Profession")
class Meta:
app_label = "alibrary"
verbose_name = _("Profession")
verbose_name_plural = _("Professions")
def __str__(self):
return '"%s" : "%s"' % (self.artist.name, self.profession.name)
|
gpl-3.0
| -6,412,760,050,981,160,000
| 29.663677
| 92
| 0.602369
| false
| 3.862186
| false
| false
| false
|
heLomaN/NetCrawler
|
nga_hot.py
|
1
|
1579
|
#!/usr/bin/env python
# coding=utf-8
import requests as rq
import random as rd
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
headers = {
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',
}
page_idx = 1
query_dict = {'fid':'-7', 'page':str(page_idx)}
url_head = 'http://bbs.ngacn.cc/thread.php'
r = rq.get(url_head, params = query_dict)
#r = rq.get(url_head, params = query_dict, headers = headers)
print 'First init OK.'
r.encoding = 'gbk'
f = open('test_first.html', 'w')
f.write(r.text)
f.close()
headers = {
'Host': 'bbs.ngacn.cc',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',
'Referer': 'http://bbs.ngacn.cc/thread.php?fid=-7&page=1',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
rand = rd.randint(1, 999)
query_dict = {'fid':-7, 'page':page_idx, 'rand': rand}
#query_dict = {'fid':-7, 'page':page_idx, 'lite':'xml', 'rand': rand}
cookies = r.cookies
print cookies
#headers = r.headers
print headers
r = rq.get(url_head, params = query_dict, headers = headers, cookies = cookies)
print r.url
r.encoding = 'gbk'
#print r.text
f = open('test.html', 'w')
f.write(r.text)
f.close()
#print r.json()
|
apache-2.0
| -929,872,826,803,928,700
| 25.762712
| 126
| 0.670044
| false
| 2.451863
| false
| true
| false
|
lynchnf/maneki-neko-web
|
socialmedia/tests.py
|
1
|
1601
|
from django.test import TestCase
from cms.api import add_plugin
from cms.models import Placeholder
from socialmedia.cms_plugins import SocialLinkPlugin
from socialmedia.models import ICON_CHOICES
class SocialLinkPluginTest(TestCase):
def test_plugin_context(self):
placeholder = Placeholder.objects.create(slot='test')
model_instance = add_plugin(
placeholder,
SocialLinkPlugin,
'en',
icon = ICON_CHOICES[17][0],
size = 1,
url = "http://mimi-the-maneki-neko.tumblr.com/"
)
plugin_instance = model_instance.get_plugin_class_instance()
context = plugin_instance.render({}, model_instance, None)
model = context['instance']
self.assertEqual(model.url, "http://mimi-the-maneki-neko.tumblr.com/")
self.assertIn('title', context)
self.assertEqual(context['title'], 'Tumblr')
self.assertIn('styleClass', context)
self.assertEqual(context['styleClass'], 'fa fa-tumblr-square fa-lg')
def test_plugin_html(self):
placeholder = Placeholder.objects.create(slot='test')
model_instance = add_plugin(
placeholder,
SocialLinkPlugin,
'en',
icon = ICON_CHOICES[17][0],
size = 1,
url = "http://mimi-the-maneki-neko.tumblr.com/"
)
html = model_instance.render_plugin({})
self.assertEqual(html, '<a href="http://mimi-the-maneki-neko.tumblr.com/" title="Tumblr" target="_blank"><i class="fa fa-tumblr-square fa-lg"></i></a>')
|
mit
| -7,077,663,203,663,897,000
| 38.073171
| 160
| 0.613991
| false
| 3.589686
| true
| false
| false
|
smartdong/PythonPractise
|
Chapter 04/BombCatcher.py
|
1
|
1793
|
import sys, random, time, pygame
from pygame.locals import *
def print_text(font, x, y, text, color=(255,255,255)):
imgText = font.render(text, True, color)
screen.blit(imgText, (x,y))
pygame.init()
screen = pygame.display.set_mode((600,500))
pygame.display.set_caption("Bomb Catching Game")
font1 = pygame.font.Font(None, 24)
pygame.mouse.set_visible(False)
white = 255,255,255
red = 220, 50, 50
yellow = 230,230,50
black = 0,0,0
lives = 3
score = 0
clock_start = 0
game_over = True
mouse_x = mouse_y = 0
pos_x = 300
pos_y = 460
bomb_x = random.randint(0,500)
bomb_y = -50
vel_y = 7
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEMOTION:
mouse_x,mouse_y = event.pos
move_x,move_y = event.rel
elif event.type == MOUSEBUTTONUP:
if game_over:
game_over = False
lives = 3
score = 0
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
screen.fill((0,0,100))
if game_over:
print_text(font1, 100, 200, "<CLICK TO PLAY>")
else:
bomb_y += vel_y
if bomb_y > 500:
bomb_x = random.randint(0, 500)
bomb_y = -50
lives -= 1
if lives == 0:
game_over = True
elif bomb_y > pos_y:
if bomb_x > pos_x and bomb_x < pos_x + 120:
score += 10
bomb_x = random.randint(0, 500)
bomb_y = -50
pygame.draw.circle(screen, black, (bomb_x-4,int(bomb_y)-4), 30, 0)
pygame.draw.circle(screen, yellow, (bomb_x,int(bomb_y)), 30, 0)
pos_x = mouse_x
if pos_x < 0:
pos_x = 0
elif pos_x > 500:
pos_x = 500
pygame.draw.rect(screen, black, (pos_x-4,pos_y-4,120,40), 0)
pygame.draw.rect(screen, red, (pos_x,pos_y,120,40), 0)
print_text(font1, 0, 0, "LIVES: " + str(lives))
print_text(font1, 500, 0, "SCORE: " + str(score))
pygame.display.update()
|
mit
| -304,384,762,807,505,800
| 20.105882
| 68
| 0.625767
| false
| 2.362319
| false
| false
| false
|
PHLF/rasa_nlu
|
_pytest/test_sanity.py
|
1
|
2984
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import importlib
import pkgutil
from collections import defaultdict
import pytest
from multiprocessing import Queue, Process
from six import PY2
def import_submodules(package_name, skip_list):
""" Import all submodules of a module, recursively, including subpackages.
`skip_list` denotes packages that should be skipped during the import"""
package = importlib.import_module(package_name)
results = []
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
if full_name not in skip_list:
imported_module = importlib.import_module(full_name)
if PY2:
reload(imported_module)
else:
importlib.reload(imported_module)
results.append(full_name)
if is_pkg:
results += import_submodules(full_name, skip_list)
return results
@pytest.mark.parametrize("banned_package", ["spacy", "mitie", "sklearn", "duckling"])
def test_no_global_imports_of_banned_package(banned_package):
"""This test ensures that neither of the banned packages are imported module wise in any of our code files.
If one of the dependencies is needed, they should be imported within a function."""
q = Queue()
p = Process(target=get_tracked_imports, args=(q,))
p.start()
tracked_imports = q.get()
p.join()
def find_modules_importing(name):
return {v for k, vs in tracked_imports.items() if k.startswith(name) for v in vs}
assert not find_modules_importing(banned_package), \
"No module should import {} globally. Found in {}".format(
banned_package, ", ".join(find_modules_importing(banned_package)))
def get_tracked_imports(q):
import inspect
# To track imports accross modules, we will replace the default import function
try:
# noinspection PyCompatibility
import __builtin__
original_import_function = __builtin__.__import__
except ImportError:
# noinspection PyCompatibility
import builtins
original_import_function = builtins.__import__
tracked_imports = defaultdict(list)
def import_tracking(name, *x, **xs):
caller = inspect.currentframe().f_back
caller_name = caller.f_globals.get('__name__')
tracked_imports[name].append(caller_name)
return original_import_function(name, *x, **xs)
if PY2:
__builtin__.__import__ = import_tracking
else:
builtins.__import__ = import_tracking
# import all available modules and track imports on the way
import_submodules("rasa_nlu", skip_list={})
if PY2:
__builtin__.__import__ = original_import_function
else:
builtins.__import__ = original_import_function
q.put(tracked_imports)
|
apache-2.0
| -1,151,380,040,605,416,700
| 32.155556
| 111
| 0.656836
| false
| 4.132964
| false
| false
| false
|
eduardoklosowski/ergo-notes
|
ergonotes/migrations/0001_initial.py
|
1
|
1776
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('priority', models.SmallIntegerField(choices=[(1, 'Alta'), (0, 'Normal'), (-1, 'Baixa')], default=0, verbose_name='prioridade')),
('title', models.CharField(max_length=32, verbose_name='título')),
('show_on_home', models.BooleanField(default=False, verbose_name='mostrar no home')),
('create_on', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modify_on', models.DateTimeField(auto_now=True, verbose_name='atualizado em')),
('markup', models.CharField(choices=[('txt', 'Texto'), ('html', 'HTML'), ('rst', 'reStructuredText'), ('mk', 'Markdown'), ('textile', 'Textile')], default='txt', verbose_name='markup', max_length=8)),
('text', models.TextField(verbose_name='texto', blank=True)),
('user', models.ForeignKey(verbose_name='usuário', related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'notas',
'verbose_name': 'nota',
'ordering': ('user', '-priority', 'title'),
},
),
migrations.AlterUniqueTogether(
name='note',
unique_together=set([('user', 'title')]),
),
]
|
agpl-3.0
| -3,621,076,773,664,285,700
| 45.684211
| 216
| 0.56708
| false
| 4.031818
| false
| false
| false
|
asimshankar/tensorflow
|
tensorflow/python/keras/integration_test.py
|
1
|
13458
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.layers import core as tf_core_layers
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.platform import test
class KerasIntegrationTest(test.TestCase):
def test_version(self):
self.assertTrue(keras.__version__.endswith('-tf'))
@test_util.run_v1_only('b/120545219')
def test_vector_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential([
keras.layers.Dense(16,
activation='relu',
input_shape=x_train.shape[1:]),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_vector_classification_functional(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(20,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(shape=x_train.shape[1:])
x = keras.layers.Dense(16, activation='relu')(inputs)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_temporal_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.LSTM(5, return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.GRU(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_temporal_classification_sequential_tf_rnn(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
activation='softmax',
dtype=dtypes.float32)))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_image_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(12, 12, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(
4, 3,
padding='same',
activation='relu',
input_shape=x_train.shape[1:]))
model.add(keras.layers.Conv2D(
8, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Conv2D(
16, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_video_classification_functional(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 8, 8, 3),
num_classes=3)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(shape=x_train.shape[1:])
x = keras.layers.TimeDistributed(
keras.layers.Conv2D(4, 3, activation='relu'))(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.TimeDistributed(keras.layers.GlobalMaxPooling2D())(x)
x = keras.layers.Conv1D(8, 3, activation='relu')(x)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_sequential(self):
# Test that Sequential models that feature internal updates
# and internal losses can be shared.
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
base_model = keras.models.Sequential([
keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
input_shape=x_train.shape[1:]),
keras.layers.BatchNormalization(),
])
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
self.assertEqual(len(model.losses), 2)
self.assertEqual(len(model.updates), 2)
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_model(self):
# Test that functional models that feature internal updates
# and internal losses can be shared.
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(x_train.shape[1:])
x = keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
input_shape=x_train.shape[1:])(inputs)
x = keras.layers.BatchNormalization()(x)
base_model = keras.models.Model(inputs, x)
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_embedding_with_clipnorm(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Embedding(input_dim=1, output_dim=1))
model.compile(optimizer=keras.optimizers.SGD(clipnorm=0.1), loss='mse')
model.fit(np.array([[0]]), np.array([[[0.5]]]), epochs=1)
def test_using_tf_layers_in_keras_sequential_model(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
model = keras.models.Sequential()
model.add(tf_core_layers.Dense(32, activation=nn.relu, input_shape=(10,)))
model.add(tf_core_layers.Dense(2, activation=nn.softmax))
model.summary()
y_train = keras.utils.to_categorical(y_train)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=0)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_using_tf_layers_in_keras_functional_model(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.Input(shape=(10,))
x = tf_core_layers.Dense(32, activation=nn.relu)(inputs)
outputs = tf_core_layers.Dense(2, activation=nn.softmax)(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=0)
self.assertGreater(history.history['val_acc'][-1], 0.7)
if __name__ == '__main__':
test.main()
|
apache-2.0
| -2,980,240,026,002,302,500
| 40.409231
| 80
| 0.588572
| false
| 3.700302
| true
| false
| false
|
g2p/SimpleTAL
|
examples/elementtree-example/basic-example.py
|
1
|
2377
|
#!/usr/bin/python
""" Example TAL program
Copyright (c) 2009 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
As simple as it gets:
1 - Create a context
2 - Compile a template
3 - Expand the template
Module Dependencies: simpleTAL, simpleTALES
"""
from simpletal import simpleTAL, simpleTALES, simpleElementTree
import sys, logging
logging.basicConfig()
xmlTree = simpleElementTree.parseFile (file="input.xml")
# Create the context that is used by the template
context = simpleTALES.Context(allowPythonPath=1)
# Add the XML element tree to the context
context.addGlobal ("input", xmlTree)
# Open the template file
templateFile = open ("basic.xml", 'rb')
# Compile a template
template = simpleTAL.compileXMLTemplate (templateFile)
# Close the template file
templateFile.close()
# Expand the template as HTML using this context
template.expand (context, sys.stdout, "utf-8")
|
bsd-3-clause
| 628,443,351,640,474,600
| 37.967213
| 75
| 0.766933
| false
| 4.214539
| false
| false
| false
|
techtonik/warehouse
|
tests/accounts/test_db.py
|
1
|
1989
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import datetime
import mock
from warehouse.accounts.tables import users, emails
def test_get_user(dbapp):
dbapp.engine.execute(users.insert().values(
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
assert {
"date_joined": mock.ANY,
"email": None,
"name": "Test User",
"username": "test-user",
} == dbapp.db.accounts.get_user("test-user")
def test_get_user_with_email(dbapp):
dbapp.engine.execute(users.insert().values(
id=1,
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
dbapp.engine.execute(emails.insert().values(
user_id=1,
email="test-user@example.com",
primary=True,
verified=True,
))
assert {
"date_joined": mock.ANY,
"email": "test-user@example.com",
"name": "Test User",
"username": "test-user",
} == dbapp.db.accounts.get_user("test-user")
def test_get_user_missing(dbapp):
assert dbapp.db.accounts.get_user("test-user") is None
|
apache-2.0
| 6,569,239,910,698,849,000
| 27.414286
| 74
| 0.641528
| false
| 3.697026
| true
| false
| false
|
CiscoSystems/nova
|
nova/tests/test_notifications.py
|
1
|
13358
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for common notifications."""
import copy
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.network import api as network_api
from nova import notifications
from nova import test
from nova.tests import fake_network
from nova.tests import fake_notifier
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class NotificationsTestCase(test.TestCase):
def setUp(self):
super(NotificationsTestCase, self).setUp()
self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
1)
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return self.net_info
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
fake_network.set_stub_network_methods(self.stubs)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager',
notify_on_state_change="vm_and_task_state",
host='testhost')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance = self._wrapped_create()
def _wrapped_create(self, params=None):
instance_type = flavors.get_flavor_by_name('m1.tiny')
sys_meta = flavors.save_flavor_info({}, instance_type)
inst = {}
inst['image_ref'] = 1
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = instance_type['id']
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['access_ip_v4'] = '1.2.3.4'
inst['access_ip_v6'] = 'feed:5eed'
inst['display_name'] = 'test_instance'
inst['hostname'] = 'test_instance_hostname'
inst['node'] = 'test_instance_node'
inst['system_metadata'] = sys_meta
if params:
inst.update(params)
return db.instance_create(self.context, inst)
def test_send_api_fault_disabled(self):
self.flags(notify_api_faults=False)
notifications.send_api_fault("http://example.com/foo", 500, None)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_send_api_fault(self):
self.flags(notify_api_faults=True)
exception = None
try:
# Get a real exception with a call stack.
raise test.TestingException("junk")
except test.TestingException as e:
exception = e
notifications.send_api_fault("http://example.com/foo", 500, exception)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
n = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(n.priority, 'ERROR')
self.assertEqual(n.event_type, 'api.fault')
self.assertEqual(n.payload['url'], 'http://example.com/foo')
self.assertEqual(n.payload['status'], 500)
self.assertIsNotNone(n.payload['exception'])
def test_notif_disabled(self):
# test config disable of the notifications
self.flags(notify_on_state_change=None)
old = copy.copy(self.instance)
self.instance["vm_state"] = vm_states.ACTIVE
old_vm_state = old['vm_state']
new_vm_state = self.instance["vm_state"]
old_task_state = old['task_state']
new_task_state = self.instance["task_state"]
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
verify_states=True)
notifications.send_update(self.context, old, self.instance)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_task_notif(self):
# test config disable of just the task state notifications
self.flags(notify_on_state_change="vm_state")
# we should not get a notification on task stgate chagne now
old = copy.copy(self.instance)
self.instance["task_state"] = task_states.SPAWNING
old_vm_state = old['vm_state']
new_vm_state = self.instance["vm_state"]
old_task_state = old['task_state']
new_task_state = self.instance["task_state"]
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
# ok now enable task state notifications and re-try
self.flags(notify_on_state_change="vm_and_task_state")
notifications.send_update(self.context, old, self.instance)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_send_no_notif(self):
# test notification on send no initial vm state:
old_vm_state = self.instance['vm_state']
new_vm_state = self.instance['vm_state']
old_task_state = self.instance['task_state']
new_task_state = self.instance['task_state']
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
service="compute", host=None, verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_send_on_vm_change(self):
# pretend we just transitioned to ACTIVE:
params = {"vm_state": vm_states.ACTIVE}
(old_ref, new_ref) = db.instance_update_and_get_original(self.context,
self.instance['uuid'], params)
notifications.send_update(self.context, old_ref, new_ref)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_send_on_task_change(self):
# pretend we just transitioned to task SPAWNING:
params = {"task_state": task_states.SPAWNING}
(old_ref, new_ref) = db.instance_update_and_get_original(self.context,
self.instance['uuid'], params)
notifications.send_update(self.context, old_ref, new_ref)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_no_update_with_states(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
task_states.SPAWNING, verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_vm_update_with_states(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING,
task_states.SPAWNING, verify_states=True)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
hostname = self.instance["hostname"]
node = self.instance["node"]
self.assertEqual(vm_states.BUILDING, payload["old_state"])
self.assertEqual(vm_states.ACTIVE, payload["state"])
self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
self.assertEqual(task_states.SPAWNING, payload["new_task_state"])
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
self.assertEqual(payload["display_name"], display_name)
self.assertEqual(payload["hostname"], hostname)
self.assertEqual(payload["node"], node)
def test_task_update_with_states(self):
self.flags(notify_on_state_change="vm_and_task_state")
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, verify_states=True)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
hostname = self.instance["hostname"]
self.assertEqual(vm_states.BUILDING, payload["old_state"])
self.assertEqual(vm_states.BUILDING, payload["state"])
self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
self.assertIsNone(payload["new_task_state"])
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
self.assertEqual(payload["display_name"], display_name)
self.assertEqual(payload["hostname"], hostname)
def test_update_no_service_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.testhost', notif.publisher_id)
def test_update_with_service_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, service="testservice")
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('testservice.testhost', notif.publisher_id)
def test_update_with_host_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, host="someotherhost")
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.someotherhost', notif.publisher_id)
def test_payload_has_fixed_ip_labels(self):
info = notifications.info_from_instance(self.context, self.instance,
self.net_info, None)
self.assertIn("fixed_ips", info)
self.assertEqual(info["fixed_ips"][0]["label"], "test1")
def test_send_access_ip_update(self):
notifications.send_update(self.context, self.instance, self.instance)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
def test_send_name_update(self):
param = {"display_name": "new_display_name"}
new_name_inst = self._wrapped_create(params=param)
notifications.send_update(self.context, self.instance, new_name_inst)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
old_display_name = self.instance["display_name"]
new_display_name = new_name_inst["display_name"]
self.assertEqual(payload["old_display_name"], old_display_name)
self.assertEqual(payload["display_name"], new_display_name)
def test_send_no_state_change(self):
called = [False]
def sending_no_state_change(context, instance, **kwargs):
called[0] = True
self.stubs.Set(notifications, '_send_instance_update_notification',
sending_no_state_change)
notifications.send_update(self.context, self.instance, self.instance)
self.assertTrue(called[0])
def test_fail_sending_update(self):
def fail_sending(context, instance, **kwargs):
raise Exception('failed to notify')
self.stubs.Set(notifications, '_send_instance_update_notification',
fail_sending)
notifications.send_update(self.context, self.instance, self.instance)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
|
apache-2.0
| -8,300,314,221,059,833,000
| 40.484472
| 78
| 0.64613
| false
| 3.756468
| true
| false
| false
|
DataDog/integrations-extras
|
storm/tests/conftest.py
|
1
|
1254
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import socket
import pytest
from datadog_checks.dev import docker_run, get_here, run_command
from datadog_checks.dev.conditions import WaitFor
from .common import HOST, INSTANCE
def wait_for_thrift():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, 6627))
sock.close()
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml')
# Build the topology jar to use in the environment
with docker_run(compose_file, build=True, service_name='topology-maker', sleep=15):
run_command(['docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose')])
nimbus_condition = WaitFor(wait_for_thrift)
with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]):
with docker_run(compose_file, service_name='storm-ui', log_patterns=[r'org.apache.storm.ui.core']):
with docker_run(
compose_file, service_name='topology', log_patterns=['Finished submitting topology: topology']
):
yield INSTANCE
|
bsd-3-clause
| -365,325,387,029,298,400
| 35.882353
| 110
| 0.69059
| false
| 3.593123
| false
| false
| false
|
Suwmlee/XX-Net
|
Python3/lib/socket.py
|
1
|
27859
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io, selectors
from enum import IntEnum
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise _GiveupOnSendfile(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError:
raise _GiveupOnSendfile(err) # not a regular file
if not fsize:
return 0 # empty file
blocksize = fsize if not count else count
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
os_sendfile = os.sendfile
try:
while True:
if timeout and not selector_select(timeout):
raise _socket.timeout('timed out')
if count:
blocksize = count - total_sent
if blocksize <= 0:
break
try:
sent = os_sendfile(sockno, fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise _GiveupOnSendfile(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
|
bsd-2-clause
| -947,895,180,707,628,000
| 35.800543
| 95
| 0.56786
| false
| 4.417156
| false
| false
| false
|
Heappl/scripts
|
context.py
|
1
|
3041
|
#!/usr/bin/python3
def parse_commandline_options():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--stack", action='store_true', dest="stack", help="produces stack trace for each running component")
parser.add_option("-l", "--last_line", type='int', dest="line", help="prints n last lines for each running component")
return parser.parse_args()
(options, args) = parse_commandline_options()
def isStartingEvent(event):
return event in ["fnen", "cost"]
def isEndingEvent(event):
return event in ["fnlv", "cofi"]
def interesting(line):
tokens = line.split("|")
if (len(tokens) < 2):
return False
return isStartingEvent(tokens[1]) or isEndingEvent(tokens[1])
def getEndingFor(event):
events = {"fnen" : "fnlv", "cost" : "cofi"}
return events.get(event, "invalid")
def getStackDescriptionFor(event):
events = {"fnlv" : "->", "cofi" : "component"}
return events.get(event, "")
def getEvent(line):
tokens = line.split("|")
return tokens[1]
def getEventData(line):
tokens = line.split("|")
if (len(tokens) < 2):
return (None, None)
if (tokens[1] in ["fnen", "fnlv"]):
return (tokens[1], tokens[3].split("(")[0])
return (tokens[1], tokens[2].split("=")[0])
def getThreadName(line):
tokens = line.split("|")
if (len(tokens) < 3):
return ""
threadTokens = tokens[2].split("=")
if (threadTokens[0] == "?"):
return threadTokens[1]
return threadTokens[0]
def splitPerThread(content):
ret = {}
for line in content:
name = getThreadName(line)
if (len(name) == 0):
continue
threadLog = ret.get(name, [])
threadLog.append(line)
ret[name] = threadLog
return ret
def generateStackForSingleThread(threadName, logs):
logs = [line for line in logs if interesting(line)]
stack = []
for line in logs:
(event, ident) = getEventData(line)
if isEndingEvent(event):
(topEvent, topIdent) = stack.pop()
if (topEvent != event) or (topIdent != ident):
print("ERROR: wrong ending event encountered (expected:{" + topEvent + "," + topIdent + "}" +
", seen:{" + event + "," + ident + "})")
else:
stack.append((getEndingFor(event), ident))
if (len(stack) > 0):
for (event, name) in stack:
print(getStackDescriptionFor(event), name)
for filepath in args:
perThreadLogs = splitPerThread(open(filepath).read().split("\n")[:-1])
if (options.stack):
for key in perThreadLogs.keys():
generateStackForSingleThread(key, perThreadLogs[key])
if (options.line):
for key in perThreadLogs.keys():
if getEvent(perThreadLogs[key][-1]) == 'cofi':
continue
for i in range(1, options.line + 1):
if len(perThreadLogs[key]) >= i:
print(perThreadLogs[key][-i])
print("\n")
|
gpl-2.0
| 6,712,782,048,356,157,000
| 32.788889
| 130
| 0.585334
| false
| 3.611639
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.