repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
google-research/google-research
|
single_view_mpi/libs/geometry.py
|
1
|
23328
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Geometry utilities.
In these functions:
* Shapes are known statically. Exception: functions dealing with
points lists, whose length is data-dependent.
* Where possible, utility functions operate on the last one or two
dimensions of their inputs, and will function irrespective of how many
preceding dimensions are present. Where it makes sense, functions support
broadcasting on the part of the shape preceding the fixed dimensions.
This is to allow preceding dimensions to freely be used for batching or
other purposes.
* Camera poses are representated as 3x4 matrices (consisting of a 3x3 rotation
matrix and a 3-coordinate translation vector):
[[ r r r tx ]
[ r r r ty ]
[ r r r tz ]]
The matrix maps a position in world-space into a position relative to the
camera position. (Conventionally, the camera position has the Z axis pointing
into the screen and the Y axis pointing down.) Functions to manipulate such
matrices have names beginning "mat34_".
* Camera intrinsics are represented as a tensor of last dimension 4. The four
elements are fx, fy (focal length) and cx, cy (principal point). Intrinsics
are independent of image-size, they are expressed as if the image runs from
(0,0) to (1,1). So typically cx == cy == 0.5, and for a 90-degree field of
view, fx == 0.5.
* Points (whether 2D or 3D) are represented using the last axis of a tensor.
A set of N 3D points would have shape [N, 3].
* Planes in 3D are represented as 4-vectors. A point x is on the plane p exactly
when p.x == 0.
* We use texture coordinates to represent points in an image. They go from (0,0)
in the top-left corner of an image to (1,1) in the bottom right. It is
convenient to work with these coordinates rather than counts of pixels,
because they are resolution-independent.
This file is organised in the following sections:
MATRICES, PLANES, POINTS
– basic 3D geometry operations.
CAMERAS
– intrinsics, projection, camera-relative points.
IMAGES AND SAMPLING
– bilinear-sampling from images.
WARPS AND HOMOGRAPHIES
– plane sweep, homography, flow warping, depth warping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from single_view_mpi.libs import utils
# ========== MATRICES, PLANES, POINTS ==========
def check_input_shape(name, tensor, axis, value):
"""Utility function for checking tensor shapes."""
shape = tensor.shape.as_list()
if shape[axis] != value:
raise ValueError('Input "%s": dimension %d should be %s. Shape = %s' %
(name, axis, value, shape))
def check_input_m34(name, tensor):
check_input_shape(name, tensor, -1, 4)
check_input_shape(name, tensor, -2, 3)
@utils.name_scope
def broadcasting_matmul(a, b, **kwargs):
(a, b) = utils.broadcast_to_match(a, b, ignore_axes=2)
return tf.matmul(a, b, **kwargs)
def mat34_to_mat44(matrix):
"""Converts 3x4 matrices to 4x4 matrices by adding filler.
Considering the last two dimensions of the input tensor, where m
indicates a matrix coefficient and t a matrix coefficient for translation,
this function does the following:
[[m, m, m, t], [[m, m, m, t],
[m, m, m, t], ===> [m, m, m, t],
[m, m, m, t]] [m, m, m, t],
[0, 0, 0, 1]]
Args:
matrix: [..., 3, 4] matrix
Returns:
A [..., 4, 4] tensor with an extra row [0, 0, 0, 1] added to each matrix.
Dimensions other than that last two are the same as for the input.
Raises:
ValueError: if input has incompatible shape.
"""
shape = matrix.shape.as_list()
check_input_m34('matrix', matrix)
extra_dims = shape[:-2]
filler = tf.constant([0.0, 0.0, 0.0, 1.0],
shape=len(extra_dims) * [1] + [1, 4])
filler = tf.tile(filler, extra_dims + [1, 1])
return tf.concat([matrix, filler], axis=-2)
def mat33_to_mat44(matrix):
"""Converts 3x3 matrices to 4x4 by adding zero translation and filler.
Considering the last two dimensions of the input tensor, where m indicates
a matrix entry, this function does the following:
[[m, m, m], [[m, m, m, 0],
[m, m, m], ===> [m, m, m, 0],
[m, m, m]] [m, m, m, 0],
[0, 0, 0, 1]]
Args:
matrix: A [..., 3, 3] tensor.
Returns:
A [..., 4, 4] matrix tensor. Dimensions other than the last two are
the same as for the input matrix.
Raises:
ValueError: if input has incompatible shape.
"""
shape = matrix.shape.as_list()
check_input_shape('matrix', matrix, -1, 3)
check_input_shape('matrix', matrix, -2, 3)
extra_dims = shape[:-2]
zeros = tf.zeros(extra_dims + [3, 1], dtype=matrix.dtype)
return mat34_to_mat44(tf.concat([matrix, zeros], axis=-1))
@utils.name_scope
def mat34_product(a, b):
"""Returns the product of a and b, 3x4 matrices.
Args:
a: [..., 3, 4] matrix
b: [..., 3, 4] matrix
Returns:
The product ab. The product is computed as if we added an extra row
[0, 0, 0, 1] to each matrix, multiplied them, and then removed the extra
row. The shapes of a and b must match, either directly or via
broadcasting.
Raises:
ValueError: if a or b are not 3x4 matrices.
"""
check_input_m34('a', a)
check_input_m34('b', b)
(a, b) = utils.broadcast_to_match(a, b, ignore_axes=2)
# Split translation part off from the rest
a33, a_translate = tf.split(a, [3, 1], axis=-1)
b33, b_translate = tf.split(b, [3, 1], axis=-1)
# Compute parts of the product
ab33 = tf.matmul(a33, b33)
ab_translate = a_translate + tf.matmul(a33, b_translate)
# Assemble
return tf.concat([ab33, ab_translate], axis=-1)
@utils.name_scope
def mat34_transform(m, v):
"""Transform a set of 3d points by a 3x4 pose matrix.
Args:
m: [..., 3, 4] matrix
v: [..., N, 3] set of N 3d points.
Returns:
The transformed points mv. The transform is computed as if we added an
extra coefficient with value 1.0 to each point, performed a matrix
multiplication, and removed the extra coefficient again. The parts of the
shape indicated by "..." must match, either directly or via broadcasting.
Raises:
ValueError: if inputs are the wrong shape.
"""
check_input_m34('m', m)
check_input_shape('v', v, -1, 3)
(m, v) = utils.broadcast_to_match(m, v, ignore_axes=2)
rotation = m[Ellipsis, :3]
# See b/116203395 for why I didn't do the next two lines together as
# translation = m[..., tf.newaxis, :, 3].
translation = m[Ellipsis, 3]
translation = translation[Ellipsis, tf.newaxis, :] # Now shape is [..., 1, 3].
# Points are stored as (N * 3) rather than (3 * N), so multiply in reverse
# rather than transposing them.
return tf.matmul(v, rotation, transpose_b=True) + translation
@utils.name_scope
def mat34_transform_planes(m, p):
"""Transform a set of 3d planes by a 3x4 pose matrix.
Args:
m: [..., 3, 4] matrix, from source space to target space
p: [..., N, 4] set of N planes in source space.
Returns:
The transformed planes p' in target space.
If point x is on the plane p, then point Mx is on the plane p'. The parts of
the shape indicated by "..." must match either directly or via broadcasting.
Raises:
ValueError: if inputs are the wrong shape.
"""
check_input_m34('m', m)
check_input_shape('p', p, -1, 4)
(m, p) = utils.broadcast_to_match(m, p, ignore_axes=2)
# If x is on the plane p, then p . x = 0. We want to find p' such that
# p' . (M x) = 0. Writing T for transpose and i for inverse, this gives us
# p'T M x = 0, so p'T = pT Mi.
# Planes are stored as (N * 4) rather than (4 * N), i.e. pT rather than p, so
# we can use this directly to compute p'T:
return tf.matmul(p, mat34_to_mat44(mat34_pose_inverse(m)))
@utils.name_scope
def mat34_pose_inverse(matrix):
"""Invert a 3x4 matrix.
Args:
matrix: [..., 3, 4] matrix where [..., 3, 3] is a rotation matrix
Returns:
The inverse matrix, of the same shape as the input. It is computed as
if we added an extra row with values [0, 0, 0, 1], inverted the
matrix, and removed the row again.
Raises:
ValueError: if input is not a 3x4 matrix.
"""
check_input_m34('matrix', matrix)
rest, translation = tf.split(matrix, [3, 1], axis=-1)
inverse = tf.linalg.matrix_transpose(rest)
inverse_translation = -tf.matmul(inverse, translation)
return tf.concat([inverse, inverse_translation], axis=-1)
@utils.name_scope
def build_matrix(elements):
"""Stacks elements along two axes to make a tensor of matrices.
Args:
elements: [n, m] matrix of tensors, each with shape [...].
Returns:
[..., n, m] tensor of matrices, resulting from concatenating
the individual tensors.
"""
rows = [tf.stack(row_elements, axis=-1) for row_elements in elements]
return tf.stack(rows, axis=-2)
@utils.name_scope
def pose_from_6dof(vec):
"""Converts vector containing 6DoF pose parameters to pose matrices.
Args:
vec: [..., 6] parameters in the order tx, ty, tz, rx, ry, rz. rx, ry and rz
are Euler angles in radians. Rotation is first by z, then by y, then by x,
and translation happens last. Each rotation is counterclockwise about its
axis.
Returns:
rigid world-to-camera transformation matrix [..., 3, 4] corresponding
to the input. Rotation angles are clamped to +/- π before conversion.
"""
check_input_shape('vec', vec, -1, 6)
shape = vec.shape.as_list()
extra_dims = shape[:-1]
# Get translation as [..., 3] and rx, ry, rz each as [..., 1].
translation, rx, ry, rz = tf.split(vec, [3, 1, 1, 1], -1)
rx = tf.squeeze(tf.clip_by_value(rx, -math.pi, math.pi), axis=-1)
ry = tf.squeeze(tf.clip_by_value(ry, -math.pi, math.pi), axis=-1)
rz = tf.squeeze(tf.clip_by_value(rz, -math.pi, math.pi), axis=-1)
cos_x = tf.cos(rx)
sin_x = tf.sin(rx)
cos_y = tf.cos(ry)
sin_y = tf.sin(ry)
cos_z = tf.cos(rz)
sin_z = tf.sin(rz)
zero = tf.zeros(extra_dims)
one = tf.ones(extra_dims)
rotate_z = build_matrix([[cos_z, -sin_z, zero], [sin_z, cos_z, zero],
[zero, zero, one]])
rotate_y = build_matrix([[cos_y, zero, sin_y], [zero, one, zero],
[-sin_y, zero, cos_y]])
rotate_x = build_matrix([[one, zero, zero], [zero, cos_x, -sin_x],
[zero, sin_x, cos_x]])
rotation = tf.matmul(tf.matmul(rotate_x, rotate_y), rotate_z)
pose = tf.concat([rotation, translation[Ellipsis, tf.newaxis]], axis=-1)
return pose
# ========== CAMERAS ==========
@utils.name_scope
def intrinsics_matrix(intrinsics):
"""Make a matrix mapping camera space to homogeneous texture coords.
Args:
intrinsics: [..., 4] intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 3, 3] matrix mapping camera space to image space.
"""
fx = intrinsics[Ellipsis, 0]
fy = intrinsics[Ellipsis, 1]
cx = intrinsics[Ellipsis, 2]
cy = intrinsics[Ellipsis, 3]
zero = tf.zeros_like(fx)
one = tf.ones_like(fx)
return build_matrix(
[[fx, zero, cx], [zero, fy, cy], [zero, zero, one]])
@utils.name_scope
def inverse_intrinsics_matrix(intrinsics):
"""Return the inverse of the intrinsics matrix..
Args:
intrinsics: [..., 4] intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 3, 3] matrix mapping homogeneous texture coords to camera space.
"""
fxi = 1.0 / intrinsics[Ellipsis, 0]
fyi = 1.0 / intrinsics[Ellipsis, 1]
cx = intrinsics[Ellipsis, 2]
cy = intrinsics[Ellipsis, 3]
zero = tf.zeros_like(cx)
one = tf.ones_like(cx)
return build_matrix(
[[fxi, zero, -cx * fxi], [zero, fyi, -cy * fyi], [zero, zero, one]])
@utils.name_scope
def homogenize(coords):
"""Convert (x, y) to (x, y, 1), or (x, y, z) to (x, y, z, 1)."""
ones = tf.ones_like(coords[Ellipsis, :1])
return tf.concat([coords, ones], axis=-1)
@utils.name_scope
def dehomogenize(coords):
"""Convert (x, y, w) to (x/w, y/w) or (x, y, z, w) to (x/w, y/w, z/w)."""
return tf.math.divide_no_nan(coords[Ellipsis, :-1], coords[Ellipsis, -1:])
@utils.name_scope
def texture_to_camera_coordinates(coords, intrinsics):
"""Convert texture coordinates to x,y,1 coordinates relative to camera.
Args:
coords: [..., 2] texture coordinates
intrinsics: [..., 4] (resolution-independent) camera intrinsics. Last
dimension (fx, fy, cx, cy).
Returns:
[..., 3] coordinates, transformed by scaling down by image size and
applying the inverse of the intrinsics. z-coordinates are all 1.
Raises:
ValueError: if coords is the wrong shape.
"""
check_input_shape('coords', coords, -1, 2)
# Shift to optical center and divide by focal length.
# (These are element-wise operations on the x and y coords.)
focal_length, optical_center = tf.split(intrinsics, [2, 2], axis=-1)
xy_coords = (coords - optical_center) / focal_length
return homogenize(xy_coords)
@utils.name_scope
def camera_to_texture_coordinates(coords, intrinsics):
"""Convert (x,y,z) coordinates relative to camera to texture coordinates.
Args:
coords: [..., 3] coordinates
intrinsics: [..., 4] camera intrinsics. Last dimension (fx, fy, cx, cy)
Returns:
[..., 2] coordinates, transformed by dividing by Z, applying camera
intrinsics and scaling to image size.
Raises:
ValueError: if coords is the wrong shape.
"""
check_input_shape('coords', coords, -1, 3)
xy_coords = tf.math.divide_no_nan(coords[Ellipsis, :2], coords[Ellipsis, 2:])
# Scale by focal length and shift optical center.
# (These are element-wise operations on the x and y coords.)
focal_length, optical_center = tf.split(intrinsics, [2, 2], axis=-1)
xy_coords = (xy_coords * focal_length) + optical_center
return xy_coords
@utils.name_scope
def get_camera_relative_points(indices, point, pose):
"""Get tensor of camera-relative 3d points in a frame.
Args:
indices: [B, P] Indices into point of coordinates to retrieve.
point: [B, N, 3] A set of N (x,y,z) coordinates per batch item
pose: [B, 3, 4] Camera pose
Returns:
[B, P, 3] Point coordinates corresponding to the indices.
Specifically result[b, p, :] = point[b, indices[b, p], :].
"""
# There is no "batched gather" so we either must loop over the batch, or
# use gather_nd. Looping over the batch is simpler so we'll do that.
point_shape = point.shape.as_list()
# Batch size must be statically known
assert (point_shape is not None and len(point_shape) and
point_shape[0] is not None)
batch_size = point_shape[0]
coordinates = []
for item in range(batch_size):
coordinates.append(tf.gather(point[item], indices[item]))
extracted_points = tf.stack(coordinates)
# Convert points to be camera-relative.
return mat34_transform(pose, extracted_points)
# ========== IMAGES AND SAMPLING ==========
@utils.name_scope
def pixel_center_grid(height, width):
"""Produce a grid of (x,y) texture-coordinate pairs of pixel centers.
Args:
height: (integer) height, not a tensor
width: (integer) width, not a tensor
Returns:
A tensor of shape [height, width, 2] where each entry gives the (x,y)
texture coordinates of the corresponding pixel center. For example, for
pixel_center_grid(2, 3) the result is:
[[[1/6, 1/4], [3/6, 1/4], [5/6, 1/4]],
[[1/6, 3/4], [3/6, 3/4], [5/6, 3/4]]]
"""
height_float = tf.cast(height, dtype=tf.float32)
width_float = tf.cast(width, dtype=tf.float32)
ys = tf.linspace(0.5 / height_float, 1.0 - 0.5 / height_float, height)
xs = tf.linspace(0.5 / width_float, 1.0 - 0.5 / width_float, width)
xs, ys = tf.meshgrid(xs, ys)
grid = tf.stack([xs, ys], axis=-1)
assert grid.shape.as_list() == [height, width, 2]
return grid
@utils.name_scope
def camera_rays(intrinsics, height, width):
"""A tensor of rays from the camera to the plane at z=1, one per pixel.
Args:
intrinsics: [..., 4] camera intrinsics
height: output height in pixels
width: output width in pixels
Returns:
[..., H, W, 3] A grid of H x W rays. Each ray is a vector (x, y, 1) in
camera space. For example, for a pixel at the principal point, the
corresponding ray is (0, 0, 1).
"""
coords = pixel_center_grid(height, width)
intrinsics = intrinsics[Ellipsis, tf.newaxis, tf.newaxis, :]
rays = texture_to_camera_coordinates(coords, intrinsics)
return rays
@utils.name_scope
def clip_texture_coords_to_corner_pixels(coords, height, width):
"""Clip texture coordinates to the centers of the corner pixels."""
min_x = 0.5 / width
min_y = 0.5 / height
max_x = 1.0 - min_x
max_y = 1.0 - min_y
return tf.clip_by_value(coords, [min_x, min_y], [max_x, max_y])
@utils.name_scope
def sample_image(image, coords, clamp=True):
"""Sample points from an image, using bilinear filtering.
Args:
image: [B0, ..., Bn-1, height, width, channels] image data
coords: [B0, ..., Bn-1, ..., 2] (x,y) texture coordinates
clamp: if True, coordinates are clamped to the coordinates of the corner
pixels -- i.e. minimum value 0.5/width, 0.5/height and maximum value
1.0-0.5/width or 1.0-0.5/height. This is equivalent to extending the image
in all directions by copying its edge pixels. If False, sampling values
outside the image will return 0 values.
Returns:
[B0, ..., Bn-1, ..., channels] image data, in which each value is sampled
with bilinear interpolation from the image at position indicated by the
(x,y) texture coordinates. The image and coords parameters must have
matching batch dimensions B0, ..., Bn-1.
Raises:
ValueError: if shapes are incompatible.
"""
check_input_shape('coords', coords, -1, 2)
tfshape = tf.shape(image)[-3:-1]
height = tf.cast(tfshape[0], dtype=tf.float32)
width = tf.cast(tfshape[1], dtype=tf.float32)
if clamp:
coords = clip_texture_coords_to_corner_pixels(coords, height, width)
# Resampler expects coordinates where (0,0) is the center of the top-left
# pixel and (width-1, height-1) is the center of the bottom-right pixel.
pixel_coords = coords * [width, height] - 0.5
# tfa_image.resampler only works with exactly one batch dimension, i.e. it
# expects image to be [batch, height, width, channels] and pixel_coords to be
# [batch, ..., 2]. So we need to reshape, perform the resampling, and then
# reshape back to what we had.
batch_dims = len(image.shape.as_list()) - 3
assert (image.shape.as_list()[:batch_dims] == pixel_coords.shape.as_list()
[:batch_dims])
batched_image, _ = utils.flatten_batch(image, batch_dims)
batched_coords, unflatten_coords = utils.flatten_batch(
pixel_coords, batch_dims)
resampled = tfa_image.resampler(batched_image, batched_coords)
# Convert back to the right shape to return
resampled = unflatten_coords(resampled)
return resampled
# ========== WARPS AND HOMOGRAPHIES ==========
@utils.name_scope
def inverse_homography(source_pose, source_intrinsics, target_pose,
target_intrinsics, plane):
"""Compute inverse homography from source to target.
This function computes a matrix H which relates the image of the plane P
in the source and target cameras by matrix multiplication as follows:
(source_u, source_v, source_w) = H (target_u, target_v, target_w)
where (u, v, w) are the homogeneous coordinates of the point in the
image-spaces of the source and target cameras.
The plane P is specified as a normal vector (plane[0:3]) in the source
camera-space plus an offset (plane[3]). A point p in source-camera-space
is in the plane when (p_x, p_y, p_z, 1) . P == 0.
Args:
source_pose: [..., 3, 4] source camera pose
source_intrinsics: [..., 4] last dimension (fx, fy, cx, cy)
target_pose: [..., 3, 4] target camera pose
target_intrinsics: [..., 4] last dimension (fx, fy, cx, cy)
plane: [..., 4] The plane P.
Returns:
[..., 3, 3] Homography matrix H.
"""
target_to_source_pose = mat34_product(source_pose,
mat34_pose_inverse(target_pose))
rotation, translation = tf.split(target_to_source_pose, [3, 1], axis=-1)
plane_normal = plane[Ellipsis, tf.newaxis, :3]
plane_offset = plane[Ellipsis, tf.newaxis, 3:]
# Everything now has 2 final dimensions for matrix operations, i.e.
# rotation [..., 3, 3] from target to source
# translation [..., 3, 1] from target to source, in source space
# plane_normal [..., 1, 3] in source space
# plane_offset [..., 1, 1] in source space
denominator = broadcasting_matmul(plane_normal, translation) + plane_offset
numerator = broadcasting_matmul(
broadcasting_matmul(-translation, plane_normal), rotation)
return broadcasting_matmul(
intrinsics_matrix(source_intrinsics),
broadcasting_matmul(rotation + tf.divide(numerator, denominator),
inverse_intrinsics_matrix(target_intrinsics)))
@utils.name_scope
def apply_homography(homography, coords):
"""Transform grid of (x,y) texture coordinates by a homography.
Args:
homography: [..., 3, 3]
coords: [..., H, W, 2] (x,y) texture coordinates
Returns:
[..., H, W, 2] transformed coordinates.
"""
height = tf.shape(coords)[-3]
coords = homogenize(utils.collapse_dim(coords, -2)) # [..., H*W, 3]
# Instead of transposing the coords, transpose the homography and
# swap the order of multiplication.
coords = broadcasting_matmul(coords, homography, transpose_b=True)
# coords is now [..., H*W, 3]
return utils.split_dim(dehomogenize(coords), -2, height)
@utils.name_scope
def homography_warp(image, homography, height=None, width=None, clamp=True):
"""Warp an image according to an inverse homography.
Args:
image: [..., H, W, C] input image
homography: [..., 3, 3] homography mapping output to input
height: desired output height (or None to use input height)
width: desired output width (or None to use input width)
clamp: whether to clamp image coordinates (see sample_image doc)
Returns:
[..., height, width, C] warped image.
"""
(image, homography) = utils.broadcast_to_match(
image, homography, ignore_axes=(3, 2))
if height is None:
height = image.shape.as_list()[-3]
if width is None:
width = image.shape.as_list()[-2]
target_coords = pixel_center_grid(height, width)
source_coords = apply_homography(homography, target_coords)
return sample_image(image, source_coords, clamp=clamp)
|
apache-2.0
| 6,051,238,215,533,420,000
| 33.343152
| 81
| 0.65869
| false
| 3.26963
| false
| false
| false
|
mgerstner/backintime
|
common/logger.py
|
1
|
3763
|
# Back In Time
# Copyright (C) 2008-2017 Oprea Dan, Bart de Koning, Richard Bailey, Germar Reitze
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import syslog
import os
import sys
import atexit
import tools
import bcolors
DEBUG = False
APP_NAME = 'backintime'
def openlog():
name = os.getenv('LOGNAME', 'unknown')
syslog.openlog("%s (%s/1)" %(APP_NAME, name))
atexit.register(closelog)
def changeProfile(profile_id):
name = os.getenv('LOGNAME', 'unknown')
syslog.openlog("%s (%s/%s)" %(APP_NAME, name, profile_id))
def closelog():
syslog.closelog()
def error(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sERROR%s: %s' %(bcolors.FAIL, bcolors.ENDC, msg), file=sys.stderr)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_ERR, 'ERROR: ' + line)
def warning(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sWARNING%s: %s' %(bcolors.WARNING, bcolors.ENDC, msg), file=sys.stderr)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_WARNING, 'WARNING: ' + line)
def info(msg , parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sINFO%s: %s' %(bcolors.OKGREEN, bcolors.ENDC, msg), file=sys.stdout)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_INFO, 'INFO: ' + line)
def debug(msg, parent = None, traceDepth = 0):
if DEBUG:
msg = '%s %s' %(_debugHeader(parent, traceDepth), msg)
print('%sDEBUG%s: %s' %(bcolors.OKBLUE, bcolors.ENDC, msg), file = sys.stdout)
for line in tools.wrapLine(msg):
syslog.syslog(syslog.LOG_DEBUG, 'DEBUG: %s' %line)
def deprecated(parent = None):
frame = sys._getframe(1)
fdir, fname = os.path.split(frame.f_code.co_filename)
fmodule = os.path.basename(fdir)
line = frame.f_lineno
if parent:
fclass = '%s.' %parent.__class__.__name__
else:
fclass = ''
func = frame.f_code.co_name
frameCaller = sys._getframe(2)
fdirCaller, fnameCaller = os.path.split(frameCaller.f_code.co_filename)
fmoduleCaller = os.path.basename(fdirCaller)
lineCaller = frameCaller.f_lineno
msg = '%s/%s:%s %s%s called from ' %(fmodule, fname, line, fclass, func)
msgCaller = '%s/%s:%s' %(fmoduleCaller, fnameCaller, lineCaller)
print('%sDEPRECATED%s: %s%s%s%s' %(bcolors.WARNING, bcolors.ENDC, msg, bcolors.OKBLUE, msgCaller, bcolors.ENDC), file=sys.stderr)
syslog.syslog(syslog.LOG_WARNING, 'DEPRECATED: %s%s' %(msg, msgCaller))
def _debugHeader(parent, traceDepth):
frame = sys._getframe(2 + traceDepth)
fdir, fname = os.path.split(frame.f_code.co_filename)
fmodule = os.path.basename(fdir)
line = frame.f_lineno
if parent:
fclass = '%s.' %parent.__class__.__name__
else:
fclass = ''
func = frame.f_code.co_name
return '[%s/%s:%s %s%s]' %(fmodule, fname, line, fclass, func)
|
gpl-2.0
| 4,494,096,716,557,691,400
| 36.257426
| 133
| 0.652405
| false
| 3.162185
| false
| false
| false
|
vincentadam87/gatsby-hackathon-seizure
|
code/python/seizures/examples/cross_validation_test.py
|
1
|
5173
|
'''
Created on 10 August 2014
@author: vincent
'''
# Loading necessary packages
import numpy as np
import sys
from seizures.data.DataLoader_v2 import DataLoader
from seizures.evaluation.XValidation import XValidation
from seizures.evaluation.performance_measures import accuracy, auc
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.features.MixFeatures import MixFeatures
from seizures.features.SEFeatures import SEFeatures
from seizures.features.StatsFeatures import StatsFeatures
from seizures.features.PLVFeatures import PLVFeatures
from seizures.features.ARFeatures import ARFeatures
from seizures.features.LyapunovFeatures import LyapunovFeatures
from seizures.prediction.ForestPredictor import ForestPredictor
from seizures.prediction.SVMPredictor import SVMPredictor
from seizures.prediction.XtraTreesPredictor import XtraTreesPredictor
from seizures.Global import Global
from sklearn.cross_validation import train_test_split
def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name="Dog_1",preprocess=True):
"""
Single patient cross validation
Returns 2 lists of cross validation performances
:param predictor_cls:
:param feature_extractor
:param patient_name:
:return:
"""
# predictor_cls is a handle to an instance of PredictorBase
# Instantiate the predictor
predictor = predictor_cls()
base_dir = Global.path_map('clips_folder')
base_dir = '/nfs/data3/kaggle_seizure/clips/'
loader = DataLoader(base_dir, feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess)
#X_train,y_seizure, y_early = loader.training_data(patient_name)
#y_train = [y_seizure,y_early]
#X_list,y_list = train_test_split(X_train,y_train)
# running cross validation
print patient_name
print "\ncross validation: seizures vs not"
result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_seizure), np.std(result_seizure), result_seizure)
print "\ncross validation: early_vs_not"
result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_early), np.std(result_early), result_early)
return result_seizure,result_early
def Xval_on_patients(predictor_cls, feature_extractor, patients_list=['Dog_1'],preprocess=True):
''' Runs cross validation for given predictor class and feature instance on the given list of patients
INPUT:
- predictor_cls: a Predictor class (implement)
- feature_extractor: an instanciation of a Features class
- patients_list: a list of subject strings e.g., ['Dog_1', 'Patient_2']
'''
assert(isinstance(feature_extractor, FeatureExtractBase))
results_seizure = []
results_early = []
for patient_name in patients_list:
result_seizure, result_early = Xval_on_single_patient(predictor_cls, feature_extractor, patient_name, preprocess=preprocess)
results_seizure.append(result_seizure)
results_early.append(result_early)
avg_results_seizure = np.mean(np.array(results_seizure),axis=0)
avg_results_early = np.mean(np.array(results_early),axis=0)
print "\ncross validation: seizures vs not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_seizure), np.std(avg_results_seizure), avg_results_seizure)
print "\ncross validation: early_vs_not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_early), np.std(avg_results_early), avg_results_early)
return avg_results_seizure, avg_results_early
# generate prediction for test data
def main():
# code run at script launch
#patient_name = sys.argv[1]
# There are Dog_[1-4] and Patient_[1-8]
patients_list = ["Dog_%d" % i for i in range(1, 5)] + ["Patient_%d" % i for i in range(1, 9)]
patients_list = ["Dog_%d" % i for i in [1]] #["Patient_%d" % i for i in range(1, 9)]#++
#feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}}])
#feature_extractor = PLVFeatures()
#feature_extractor = MixFeatures([{'name':"PLVFeatures",'args':{}},{'name':"ARFeatures",'args':{}}])
#feature_extractor = ARFeatures()
feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}},{'name':"PLVFeatures",'args':{}},{'name':'SEFeatures','args':{}}])
#feature_extractor = SEFeatures()
#feature_extractor = LyapunovFeatures()
#feature_extractor = StatsFeatures()
preprocess = True
predictor = SVMPredictor
#predictor = XtraTreesPredictor
if preprocess==True:
print 'Preprocessing ON'
else:
print 'Preprocessing OFF'
print 'predictor: ',predictor
Xval_on_patients(predictor,feature_extractor, patients_list,preprocess=preprocess)
if __name__ == '__main__':
main()
|
bsd-2-clause
| 6,804,000,983,729,179,000
| 38.792308
| 135
| 0.70462
| false
| 3.359091
| false
| false
| false
|
elena/django
|
django/conf/global_settings.py
|
6
|
22349
|
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ar-dz', gettext_noop('Algerian Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('ig', gettext_noop('Igbo')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('ky', gettext_noop('Kyrgyz')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tg', gettext_noop('Tajik')),
('th', gettext_noop('Thai')),
('tk', gettext_noop('Turkmen')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('uz', gettext_noop('Uzbek')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default primary key field type.
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'DENY'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', 'None', or False to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of seconds a password reset link is valid for (default: 3 days).
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter'
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_REFERRER_POLICY = 'same-origin'
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
bsd-3-clause
| 7,911,465,464,581,487,000
| 33.648062
| 101
| 0.687489
| false
| 3.419217
| false
| false
| false
|
IMIO/django-fixmystreet
|
django_fixmystreet/fixmystreet/views/reports/subscribers.py
|
1
|
2154
|
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.db import IntegrityError
from django_fixmystreet.fixmystreet.models import FMSUser
from django_fixmystreet.fixmystreet.models import Report, ReportSubscription
def create(request, report_id):
report = get_object_or_404(Report, id=report_id)
#CREATE USER CITIZEN IF NECESSARY
try:
user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email'))
except FMSUser.DoesNotExist:
#Add information about the citizen connected if it does not exist
user = FMSUser.objects.create(username=request.REQUEST.get('citizen_email'), email=request.REQUEST.get('citizen_email'), first_name='ANONYMOUS', last_name='ANONYMOUS', agent=False, contractor=False, manager=False, leader=False)
#VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST
if not ReportSubscription.objects.filter(subscriber=user, report=report).exists():
subscriber = ReportSubscription(subscriber=user, report=report)
subscriber.save()
messages.add_message(request, messages.SUCCESS, _("You have subscribed from updates successfully"))
return HttpResponseRedirect(report.get_absolute_url())
def remove(request, report_id):
report = get_object_or_404(Report, id=report_id)
try:
user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email'))
except FMSUser.DoesNotExist:
HttpResponseRedirect(report.get_absolute_url())
#VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST
try:
subscription = ReportSubscription.objects.get(subscriber=user, report=report)
subscription.delete()
messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully"))
except ReportSubscription.DoesNotExist:
#Do nothing. A subscription for this user already exists...
messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully"))
return HttpResponseRedirect(report.get_absolute_url())
|
agpl-3.0
| 3,971,464,901,499,527,000
| 42.959184
| 235
| 0.749304
| false
| 4.048872
| false
| false
| false
|
tharwan/CoopCPS
|
mpi_result_plot.py
|
1
|
2363
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from utilities import *
from image_database import saveFiguesWithData
from sys import argv, exit
from numpy import load
import argparse
import seaborn as sns
from os import path as pathtools
parser = argparse.ArgumentParser(description='process data from cluster')
parser.add_argument('file',help="what file to work on",nargs='*')
parser.add_argument('-m','--metadata',action='store_true',help="print only metadata")
parser.add_argument('-s','--save',action='store_true',help="save images to dropbox")
parser.add_argument('--save_only',action='store_true',help="save images to dropbox, do not show on screen")
parser.add_argument('-p','--save_path',help="override the dafault save path")
args = parser.parse_args()
for filename in args.file:
f = load(filename)
print filename
meta = str(f['metadata'])
meta = meta.replace(';','\n')
print meta
if args.metadata:
exit()
plt.close("all")
figs = {}
fig_k = plt.figure()
plot_appliances_aggregate(f['appliances'],f['t'])
figs['appliances']=fig_k
fig_behaviour = plt.figure(figsize=(12,6))
matrix = plot_behavior(f['beh_matrix'])
figs['behavior']=fig_behaviour
agent_power = plt.figure()
plot_agent_power(f['P_all'],f['t'][1:])
figs['agent_power']=agent_power
overall_power = plt.figure()
plot_power_usage(f['P_global'],f['t'][1:])
figs['overall_power']=overall_power
plt.figure()
plot_appl_matrix(f['appliances'])
plt.figure()
matrix = f['appliances']
app = downsample(matrix)
time = downsample(f['t'])
sns.tsplot(app,time=time, err_style="unit_traces", err_palette=sns.dark_palette("crimson", len(app)), color="k");
plt.xlabel('time')
plt.ylabel('app')
plt.figure()
s = f['selfish']
plt.plot(s)
plt.ylim([0,1])
plt.xlabel('agent')
plt.ylabel('selfishness')
meta = str(f['metadata'])
meta_dict = {pair.split(':')[0]:pair.split(':')[1] for pair in meta.split(';')}
P_max = float(meta_dict['U'])**2/float(meta_dict['Ri'])/4
p_matrix = f['P_all']
sum_P = np.mean(p_matrix,axis=1)
p_equal = P_max/float(p_matrix.shape[0])
print "p_equal", p_equal, "P_max", P_max, "ptp", np.ptp(sum_P-p_equal), "gini",gini_coeff(sum_P)
if args.save or args.save_only:
path = args.save_path
saveFiguesWithData(path, figs, str(f['metadata']),prefix=pathtools.basename(filename)[:-4])
if not(args.save_only):
plt.show()
|
mit
| 3,038,978,000,681,454,000
| 26.16092
| 114
| 0.677529
| false
| 2.763743
| false
| false
| false
|
robertsj/poropy
|
pyqtgraph/examples/GraphicsScene.py
|
1
|
1462
|
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
from pyqtgraph.GraphicsScene import GraphicsScene
app = QtGui.QApplication([])
win = pg.GraphicsView()
win.show()
class Obj(QtGui.QGraphicsObject):
def __init__(self):
QtGui.QGraphicsObject.__init__(self)
GraphicsScene.registerObject(self)
def paint(self, p, *args):
p.setPen(pg.mkPen(200,200,200))
p.drawRect(self.boundingRect())
def boundingRect(self):
return QtCore.QRectF(0, 0, 20, 20)
def mouseClickEvent(self, ev):
if ev.double():
print "double click"
else:
print "click"
ev.accept()
#def mouseDragEvent(self, ev):
#print "drag"
#ev.accept()
#self.setPos(self.pos() + ev.pos()-ev.lastPos())
vb = pg.ViewBox()
win.setCentralItem(vb)
obj = Obj()
vb.addItem(obj)
obj2 = Obj()
win.addItem(obj2)
def clicked():
print "button click"
btn = QtGui.QPushButton("BTN")
btn.clicked.connect(clicked)
prox = QtGui.QGraphicsProxyWidget()
prox.setWidget(btn)
prox.setPos(100,0)
vb.addItem(prox)
g = pg.GridItem()
vb.addItem(g)
## Start Qt event loop unless running in interactive mode.
if sys.flags.interactive != 1:
app.exec_()
|
mit
| 6,903,885,841,460,964,000
| 21.492308
| 71
| 0.620383
| false
| 3.315193
| false
| false
| false
|
interpss/DeepMachineLearning
|
ipss.dml/py/c_graph/single_net/predict_voltage5.py
|
1
|
3237
|
'''
Copyright (C) 2005-17 www.interpss.org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Use NN-model to predict the bus voltage for a set of scale-factors
Starting from the predict_voltage1.py case, the following changes are made
- The NN-Model Loadflow method is used
- ieee14-2 case is used, where PV bus limit are set to a very large number
'''
from datetime import datetime
import tensorflow as tf
import sys
sys.path.insert(0, '../..')
import lib.common_func as cf
train_points = 100
#
# load the IEEE-14Bus case
#
filename = 'testdata/ieee14-2.ieee'
noBus, noBranch = cf.ipss_app.loadCase(filename, 'NNLFLoadChangeTrainCaseBuilder')
print(filename, ' loaded, no of Buses, Branches:', noBus, ', ', noBranch)
# define model size
size = noBus * 2
#print('size: ', size)
# define model variables
W1 = tf.Variable(tf.zeros([size,size]))
b1 = tf.Variable(tf.zeros([size]))
init = tf.initialize_all_variables()
# define model
def nn_model(data):
output = tf.matmul(data, W1) + b1
return output
# define loss
x = tf.placeholder(tf.float32, [None, size])
y = tf.placeholder(tf.float32)
error = tf.square(nn_model(x) - y)
loss = tf.reduce_sum(error)
# define training optimization
optimizer = tf.train.GradientDescentOptimizer(cf.learning_rate)
train = optimizer.minimize(loss)
# run the computation graph
with tf.Session() as sess :
sess.run(init)
# run the training part
# =====================
print('Begin training: ', datetime.now())
# retrieve training set
trainSet = cf.ipss_app.getTrainSet(train_points)
train_x, train_y = cf.transfer2PyArrays(trainSet)
# run the training part
for i in range(cf.train_steps):
if (i % 1000 == 0) : print('Training step: ', i)
sess.run(train, {x:train_x, y:train_y})
print('End training: ', datetime.now())
'''
print('W1: ', sess.run(W1))
print('b1: ', sess.run(b1))
'''
# run the verification part
# =========================
# retrieve a test case
for factor in [0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.55] :
#for factor in [0.45, 1.0, 1.55] :
testCase = cf.ipss_app.getTestCase(factor)
test_x, test_y = cf.transfer2PyArrays(testCase)
# compute model output (network voltage)
model_y = sess.run(nn_model(x), {x:test_x})
#printArray(model_y, 'model_y')
netVoltage = cf.transfer2JavaDblAry(model_y[0], size)
print('model out mismatch: ', cf.ipss_app.getMismatchInfo(netVoltage))
|
apache-2.0
| -7,357,071,836,146,111,000
| 27.162162
| 87
| 0.623417
| false
| 3.378914
| true
| false
| false
|
lzw120/django
|
mysite/mysite/settings.py
|
1
|
5565
|
# Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/Users/zewenli/git/django/mysite/mysite/books/mydata.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '19xu0m_c&qbwxk@hl1n0um2nvfo&=@jclatjdf!#_z)z-k5s5='
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# a better idea would be like this:
# import os.path
# os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
# but never mind, sicne this is my own small django app
'/Users/zewenli/git/django/mysite//mysite/templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
'mysite.books',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
bsd-3-clause
| -9,091,811,255,458,302,000
| 34.44586
| 140
| 0.679245
| false
| 3.634879
| false
| false
| false
|
willsirius/DualTreeRRTStartMotionPlanning
|
pythonVision2/userdefined.py
|
1
|
13777
|
import time
import openravepy
import sys
import numpy as np
from numpy import sin,cos
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# import random
import transformationFunction as tf
import kdtree
import scipy.spatial as spatial
import cgkit.all as cg
# weight of quaternion in distance
QWEIGHT = 0.1
def S2CQ(s):
# return the configuration part of a state point
# s = [x,v,Q,W]
return s[0:3]+s[6:10]
def nnNodeIndexPathSQ(s,path,startIndex):
# return the nearest node index in path of state point s, index smaller than startIndex won't count
minL = 1000
for i in range(startIndex,len(path)):
dist = distCSQ(s,path[i])
if minL > dist:
minIndex = i
minL = dist
return minIndex
def sampleFSC(s,c):
# sample force vector at state s while setting c as the goal
# error is the position error with the goal
# stdev = 1
# q = s[6:10]
# R = Q2R(q)
# zw = np.matmul(R,[0,0,1])
# zproj = zw[2]
# average = 4.5325/zproj
# # for the control part
# R = Q2R(s[6:10])
# X = Q2R(s[0:3])
# V = np.array(s[3:6])
# O = np.array(s[10:13])
# dR = Q2R(c[3:7])
# dX = np.array(c[0:3])
# dV = np.array([0,0,0])
# dO = dV
# eR = vee(0.5*(np.matmul(dR.T,R)-np.matmul(R.T,dR)))
# eO = O - np.matmul(np.matmul(R.T,dR),dO)
stdev = 0.6
R = Q2R(s[6:10])
X = s[0:3]
V = np.array(s[3:6])
O = np.array(s[10:13])
dR = Q2R(c[3:7])
dX = np.array(c[0:3])
dV = np.array([0,0,0])
dO = dV
zw = np.matmul(R,[0,0,1])
zproj = zw[2]
average = 4.5325/zproj
eX = dX - X
eXb = np.matmul(R.T,eX) #error on X in body Frame
eVb = np.matmul(R.T,dV - V)
kz = 1
kx = 0.5
ky = 0.5
kv = 0.3
eXb = eXb + eVb*kv
eXx = eXb[0]
eXy = eXb[1]
eXz = eXb[2]
average = average + eXz*kz
f1 = np.random.normal(average + 0.5*kx*eXx, stdev)
f2 = np.random.normal(average + 0.5*ky*eXy, stdev)
f3 = np.random.normal(average - 0.5*kx*eXx, stdev)
f4 = np.random.normal(average - 0.5*ky*eXy, stdev)
f = [f1,f2,f3,f4]
return f
def distCSQ(s,c,w = QWEIGHT):
# return the distance between a state point and a configuration point
# using quaternion
# s = [x,v,Q,W]
return distXQ(S2CQ(s),c,w)
def sampleFS(s):
# sample a fore vector
# based on the current state and try to balance the copter
# avf = 1.85*9.8/4
stdev = 0.5
q = s[6:10]
R = Q2R(q)
zw = np.matmul(R,[0,0,1])
zproj = zw[2]
average = 4.5325/(0.5*zproj+0.5)
f1 = abs(np.random.normal(average, stdev))
f2 = abs(np.random.normal(average, stdev))
f3 = abs(np.random.normal(average, stdev))
f4 = abs(np.random.normal(average, stdev))
f = [f1,f2,f3,f4]
return f
def sampleF():
# sample a fore vector
stdev = 1
average = 5;
f1 = abs(np.random.normal(average, stdev))
f2 = abs(np.random.normal(average, stdev))
f3 = abs(np.random.normal(average, stdev))
f4 = abs(np.random.normal(average, stdev))
f = [f1,f2,f3,f4]
return f
def distXQ(a,b, w = QWEIGHT):
# return the distance between two configuration
ax = np.array(a[0:3])
aq = np.array(a[3:7])
bx = np.array(b[0:3])
bq = np.array(b[3:7])
return np.linalg.norm(ax - bx) + w* (1 - np.abs(np.dot(aq,bq)))
def nnNodeCQ(tree,node):
# tree is a dictionary
# node is a list
# Configuratoin space
# Using quaternion for orientation
min = 10000
for i in tree:
iToNode = distXQ(node,i)
if iToNode < min:
min = iToNode
minIndex = i
return minIndex
def xyzt(start,end,t):
# return a interpolation of start to end at t
return list((np.array(end)-np.array(start))*t + np.array(start))
def stepXQ(start,end,n):
# return a configuration sequence in the form of [X,Q]
# n >=2
if n == 2:
return [start,end]
qs = cg.quat(start[3:7])
qe = cg.quat(end[3:7])
xyzs = start[0:3]
xyze = end[0:3]
nodes = []
for i in range(0,n):
t = float(i)/(n-1)
# print t
qt = cg.slerp(t, qs, qe, shortest=True)
nodes.append(list(xyzt(xyzs,xyze,t)+[qt.w,qt.x,qt.y,qt.z]))
return nodes
def stepNodesQ(start,end,step):
# return a list of nodes start from the s to e, with a specific step
# the node is in the form of [X,Q]
# the returned path exclude the start
l = distXQ(start,end)
if l <= step:
return [end]
else:
n = int(np.ceil(l/step)) + 1
nodes = stepXQ(start , end , n)
del nodes[0]
nodes.pop()
nodes.append(end)
return nodes
def discretePath(path, step = 0.1):
# input a path and a max step, return a discreterized path with maximum step
newPath = [path[0]]
for i in range(0,len(path)-1):
NodeS = path[i]
NodeE = path[i+1]
seg = stepNodesQ(NodeS,NodeE,step)
newPath = newPath + seg
return newPath
def step1NodeQ(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = distXQ(start,end)
if l <= step:
return end
else:
t = step/l
qs = cg.quat(start[3:7])
qe = cg.quat(end[3:7])
qt = cg.slerp(t, qs, qe, shortest=True)
return list(xyzt(start[0:3],end[0:3],t)+[qt.w,qt.x,qt.y,qt.z])
def getpath(tree,goal):
# get the path from a RRT tree
# tree is in dictionary
# path , goal is list
path = [goal]
while 1:
if tree[tuple(path[0])] == tuple(path[0]):
break
path = [list(tree[tuple(path[0])])]+path
return path
def nodesDist(x,y):
return np.linalg.norm(np.asarray(x)-np.asarray(y))
def stepNodes(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = nodesDist(start,end)
if l <= step:
return [end]
else:
n = int(np.ceil(l/step))
delta = (np.asarray(end)-np.asarray(start))/l*step
nodes = []
for i in range(0,n-1):
nodes.append(list(np.asarray(start)+delta*(i+1)))
nodes.append(end)
return nodes
def step1Node(start,end,step):
# return a node steer to end from start
l = nodesDist(start,end)
if l <= step:
return end
else:
return list(np.asarray(start)+(np.asarray(end)-np.asarray(start))/l*step)
def plotHist(x):
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.show()
def limitTo(a,lower,upper):
if a <= lower:
return lower
if a >= upper:
return upper
return a
# sample a anlge from
# [-4.5,3.5,-2.2,2.2,0.21,1.54]
def sampleCE(workspaceBound = [-4.5,4.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = np.random.uniform(0,2*np.pi)
while 1:
q2 = np.abs(np.random.normal(0,np.pi/4))
if q2 <= np.pi/2:
break
return [x,y,z,q1,q2,q3]
def sampleCQ(workspaceBound = [-4.5,4.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = np.random.uniform(0,2*np.pi)
# q3 = 0 #np.random.uniform(0,2*np.pi)
# while 1:
# q2 = np.abs(np.random.normal(0,np.pi/2))
# if q2 <= np.pi/2:
# break
q2 = np.random.uniform(0,0.5*np.pi)
return [x,y,z] + list(tf.quaternion_from_euler(q1,q2,q3,'rzxz'))
def E2Q(x):
return x[0:3] + list(tf.quaternion_from_euler(x[3],x[4],x[5],'rzxz'))
def Q2R(Q):
# convert a quaternion to a rotation matrix
# input must be a unit quaternion
qw = Q[0]
qx = Q[1]
qy = Q[2]
qz = Q[3]
R = np.array([[1 - 2*qy**2 - 2*qz**2, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx**2 - 2*qz**2, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw ,1 - 2*qx**2 - 2*qy**2]])
return R
def genCQ(x,y,z,q1,q2,q3):
# generate a quaternion by parameters
sq32 = sin(q3/2)
sq1 = sin(q1)
# print sq32
# print sq1
return [x,y,z,cos(q3/2),sq32*sq1*cos(q2),sq32*sq1*sin(q2),sq32*cos(q1)]
def hat(v):
# hat map of a vector
# input an numpy array or list, output an numpy array
return np.array([[0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0]])
def vee(A):
# inverse of hat map
return np.array([A[2,1],A[0,2],A[1,0]])
def cross(a, b):
c = np.array([[a[1]*b[2] - a[2]*b[1]],
[a[2]*b[0] - a[0]*b[2]],
[a[0]*b[1] - a[1]*b[0]]])
return c
def updateState(s1,u,ts):
# update state x1 to x2 with control input u and time step ts
# s uses position vector and quaternion to represent
# s = [x,v,Q,W] Q is the position,velocity, attitude quaternion ad angular velocity
# the quaternion are translated to a rotation matrix for computation
# then the rotatoin matrix is converted to quaternion before return
# input and output are both lists
# u rotation speed of each motor
# a accelatation in inertial frame
# x position in inertial frame
# v velocity in inertial frame
# Q rotation quaternion of the body in the inertial frame
# W angular velocity in the body frame
# M moment vector in the body fixed frame
# m total mass of the drone
# Rd the derivetive of rotation matrix
# J inertia matrix
# ctf constant to convert force to torque: f*ctf = t
# MV moment vector f,mx,my,mz
J = np.array([[0.04,0,0],
[0,0.04,0],
[0,0,0.07]])
Jinv = np.array([[ 25. , 0. , 0. ],
[ 0. , 25. , 0. ],
[ 0. , 0. , 14.28571429]])
m = 1.85
d = 0.2
ctf = 0.008
g = 9.8
e3 = np.array([0,0,1])
MV = np.matmul(np.array([[1,1,1,1],[0,-d,0,d],[d,0,-d,0],[-ctf,ctf,-ctf,ctf]]),np.array([u[0],u[1],u[2],u[3]]))
f = MV[0]
M = MV[[1,2,3]]
x1 = np.array(s1[0:3])
v1 = np.array(s1[3:6])
Q1 = np.array(s1[6:10])
W1 = np.array(s1[10:13])
R1 = Q2R(Q1)
R1d = np.matmul(R1,hat(W1))
a = - g*e3+(f*np.matmul(R1,e3))/m
W1d = np.matmul( Jinv, M - np.cross(W1,np.matmul(J,W1)))
x2 = x1 + ts*v1
v2 = v1 + ts*a
R2 = R1 + ts*R1d
W2 = W1 + ts*W1d
R2t = np.identity(4)
R2t[0:3,0:3] = R2
Q2 = tf.quaternion_from_matrix(R2t)
s2 = list(x2)+list(v2)+list(Q2)+list(W2)
return s2
# start = [1,2,3,1,0,0,0]
# end = [3,2,5,0,1,0,0]
# # stepNodesQ
# for i in stepNodesQ(start,end,0.1):
# print i#,distXQ(i,start),distXQ(i,end)
# a = np.array([1,2,3])
# print np.dot(a,a)
# print "test update state"
# s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
# # s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
# u = [0,0,0,0]
# ts = 0.02
# t = range(0,100)
# for tt in t:
# s2 = updateState(s2,u,ts)
# x1 = np.array(s2[0:3])
# v1 = np.array(s2[3:6])
# Q1 = np.array(s2[6:10])
# W1 = np.array(s2[10:13])
# E1 = tf.euler_from_quaternion(Q1)
# print x1
# print v1
# print Q1
# print W1
# axarr[0, 0].plot(x, y)
# axarr[0, 0].set_title('Axis [0,0]')
# axarr[0, 1].scatter(x, y)
# axarr[0, 1].set_title('Axis [0,1]')
# axarr[1, 0].plot(x, y ** 2)
# axarr[1, 0].set_title('Axis [1,0]')
# axarr[1, 1].scatter(x, y ** 2)
# axarr[1, 1].set_title('Axis [1,1]')
# # Fine-tune figure; hide x ticks for top plots and y ticks for right plots
# plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
# plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
# q = [1,0,0,0]
# q0 = tf.random_quaternion()
# r0 = Q2R(q0)
# print hat([1,2,3])
# print tf.euler_from_matrix(r0)
# print tf.euler_from_quaternion(q0)
# print hat([1,2,3])
# print [1,2,3,4][3]
# v = [1,2,3]
# np.array([0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0])
# print sampleRotation()
# # print np.random.normal(0, 3.14, 1)
# eM = tf.euler_matrix(0,0,1.57)
# print eM
# print np.random.uniform(0,3)
# # print 1
# print tf.random_rotation_matrix()
# print np.dot(tf.random_quaternion(),tf.random_quaternion())
# print np.matmul(tf.random_rotation_matrix(),tf.random_rotation_matrix())
# start = tf.random_quaternion();
# print start
# print tuple(start)
# a = {tuple(start):tuple(start)}
# print a
# print a[tuple(start)]
# x = [sampleC()];
# KDtree = kdtree.create(x)
# print x
# for i in range(0,200):
# # x.append(sampleC()[5])
# newnode =sampleC()
# x.append(newnode)
# KDtree.add(newnode)
# # print x
# kdtree.visualize(KDtree)
# node = sampleC()
# print node
# a = KDtree.search_nn(node)[0].data
# print a
# aa = 1000
# for i in x:
# # print "this is i"
# # print np.asarray(i)
# # print type(np.asarray(i))
# # print np.linalg.norm(np.asarray(i),np.asarray(i))
# aa = min(aa,np.linalg.norm(np.asarray(i)-np.asarray(node)))
# print aa
# print np.linalg.norm(np.asarray(a)-np.asarray(node))
# print nodesDist(1,3)
# print nodesDist([1,2,3],[4,5,6])
# print np.power(nodesDist([[2,3,4],[2,3,4]],[[1,2,3],[1,2,3]]),2)
# print np.asarray([[2,3,4],[2,3,4]])
# print np.floor(3.4)
# yy = [];
# yy.append([1,2,3])
# yy.append([1,2,5])
# print yy
# print ""
# print step1Node([30,40],[0,0.1],5)
# a = {(2,3):(1,2),(1,2):(1,2),(3,4):(1,2),(5,6):(3,4),(9,8):(3,4)};
# print a
# print getpath(a,[5,6])
# print ""
# points = np.array([ (3, 4), (1, 2),(4, 5),(6,7),(2,5),(2,4)])
# points = [[1,2],[4,5],[5,2]]
# point_tree = spatial.KDTree(points)
# This finds the index of all points within distance 1 of [1.5,2.5].
# print(point_tree.query_ball_point([1.5, 2.5], 2))
# print point_tree.query([1.5, 2.5])
# print point_tree.data[point_tree.query([1.5, 2.5])[1]]
# [0]
# # This gives the point in the KDTree which is within 1 unit of [1.5, 2.5]
# print(point_tree.data[point_tree.query_ball_point([1.5, 2.5], 1)])
# # [[1 2]]
# # More than one point is within 3 units of [1.5, 1.6].
# print(point_tree.data[point_tree.query_ball_point([1.5, 1.6], 3)])
# # [[1 2]
# # [3 4]]
# x = []
# for i in range(0,1000):
# while 1:
# q1 = np.random.normal(np.pi/4,np.pi/8)
# if np.abs(q1-np.pi/4) <= np.pi/4:
# break
# x.append(q1)
# plotHist(x)
# startconfig = [ 4.0,-1.5 ,0.2 ,1 ,0.0, 0.0, 0.0 ]
# print E2Q(startconfig)
|
mit
| -7,179,047,926,765,260,000
| 20.93949
| 112
| 0.605865
| false
| 2.182322
| false
| false
| false
|
Sendinel/Sendinel
|
sendinel/notifications/forms.py
|
1
|
1461
|
from django.forms import CharField, ModelChoiceField, DateTimeField, Form
from django.utils.translation import ugettext as _
from sendinel.backend.authhelper import format_and_validate_phonenumber
from sendinel.backend.models import Sendable, \
WayOfCommunication, \
get_enabled_wocs
class NotificationValidationForm(Form):
phone_number = CharField(validators = [format_and_validate_phonenumber],
error_messages={'required':_('Please enter a phone number')})
way_of_communication = ModelChoiceField(
queryset = get_enabled_wocs(),
error_messages={'required': \
_('Please choose a way of communication')})
date = DateTimeField(error_messages={ \
'required': _('Please choose a date'), \
'invalid': _('Please choose a date')})
class NotificationValidationFormBluetooth(Form):
way_of_communication = ModelChoiceField(
queryset = get_enabled_wocs(),
error_messages={'required': \
_('Please choose a way of communication')})
date = DateTimeField(error_messages={ \
'required': _('Please choose a date'), \
'invalid': _('Please choose a date')})
|
mit
| 6,670,814,485,201,891,000
| 46.129032
| 80
| 0.543463
| false
| 5.492481
| false
| false
| false
|
Septima/qgis-qlrbrowser
|
src/QlrBrowser/mysettings/qgissettingmanager/types/integer.py
|
1
|
3535
|
#-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : denis.rouzaud@gmail.com
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
# for combobox, the value corresponds to the index of the combobox
from PyQt5.QtWidgets import QLineEdit, QSpinBox, QSlider, QComboBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
class Integer(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, int, QgsProject.instance().readNumEntry, QgsProject.instance().writeEntry, options)
def check(self, value):
if type(value) != int and type(value) != float:
raise NameError("Setting %s must be an integer." % self.name)
def config_widget(self, widget):
if type(widget) == QLineEdit:
return LineEditIntegerWidget(self, widget, self.options)
elif type(widget) in (QSpinBox, QSlider):
return SpinBoxIntegerWidget(self, widget, self.options)
elif type(widget) == QComboBox:
return ComboBoxIntegerWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for integers for the moment (setting: %s)" %
(type(widget), self.name))
class LineEditIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.textChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setText('{}'.format(value))
def widget_value(self):
try:
value = int(self.widget.text())
except ValueError:
value = None
return value
class SpinBoxIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.valueChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setValue(value)
def widget_value(self):
return self.widget.value()
class ComboBoxIntegerWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.currentIndexChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setCurrentIndex(value)
def widget_value(self):
return self.widget.currentIndex()
|
gpl-2.0
| -6,054,332,582,927,886,000
| 34
| 142
| 0.645545
| false
| 4.316239
| false
| false
| false
|
DINA-Web/datasets
|
collections-data/transformations/prepare_geography.py
|
1
|
4838
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy
import pandas
"""
Script for preparing geography data. Writes output to geography.csv.
Run from commandline, like this:
prepare_geography.py treedef_filename basetree_filename [--sweden filename]
"""
def append_country_geography(frame, country_frame, country):
country_id = frame.ID[frame.Name==country].values[0]
country_frame.ID = (
country_frame.ID.astype(int) + max(frame.ID.astype('int')))
country_frame.ParentID = (
country_frame.ID.astype(int).fillna(0) +
max(frame.ID.astype(int)))
country_frame.loc[country_frame.Name==country, 'ID'] = country_id
frame = pandas.concat([frame, country_frame])
frame.drop_duplicates(subset='ID', inplace=True)
return frame
if __name__ == '__main__':
help_text = 'Transform geography data import to Specify'
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument(
dest='treedefitems',
type=argparse.FileType('r'),
help='path to file with tree definition items')
parser.add_argument(
dest='basetree',
type=argparse.FileType('r'),
help='path to file with the base tree')
parser.add_argument(
'--denmark',
dest='denmark',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Denmark')
parser.add_argument(
'--finland',
dest='finland',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Finland')
parser.add_argument(
'--norway',
dest='norway',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Norway')
parser.add_argument(
'--sweden',
dest='sweden',
type=argparse.FileType('r'),
metavar='filename',
help='path to file with geography for Sweden')
arguments = parser.parse_args()
rank_names = {
'L0 earth' : 'Earth',
'L1 continent': 'Continent',
'L2 region': 'Region',
'L3 area': 'Area',
'L4 country': 'Country',
'L5 province': 'State',
'L6 district': 'County'}
output_columns = [
'geography_sourceid',
'parent_sourceid',
'name',
'geographytreedefitem_sourceid']
treedefitems = pandas.read_csv(arguments.treedefitems, dtype='unicode')
basetree = pandas.read_csv(arguments.basetree, dtype='unicode')
# Add root node
root_id = min(basetree.ID.astype(int) - 1)
basetree.loc[basetree.ParentID.isnull(), 'ParentID'] = root_id
number_to_add = 1 - root_id
basetree.ID = basetree.ID.astype(int) + number_to_add
basetree.ParentID = basetree.ParentID.astype(int) + number_to_add
basetree = basetree.append({
'ID': root_id + number_to_add,
'ParentID': numpy.nan,
'Name': 'Earth',
'Category': 'L0 earth'}, ignore_index=True)
basetree = basetree[['ID', 'ParentID', 'Name', 'Category']]
if arguments.denmark:
geo_den = pandas.read_csv(arguments.denmark, dtype='unicode')
geo_den = geo_den[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_den, 'Denmark')
if arguments.finland:
geo_fin = pandas.read_csv(arguments.finland, dtype='unicode')
geo_fin = geo_fin[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_fin, 'Finland')
if arguments.norway:
geo_nor = pandas.read_csv(arguments.norway, dtype='unicode')
geo_nor = geo_nor[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_nor, 'Norway')
if arguments.sweden:
geo_swe = pandas.read_csv(arguments.sweden, dtype='unicode')
geo_swe = geo_swe[['ID', 'ParentID', 'Name', 'Category']]
basetree = append_country_geography(basetree, geo_swe, 'Sweden')
basetree['Category'] = basetree['Category'].replace(rank_names)
treedefitems_merge = treedefitems[[
'geographytreedefitem_sourceid',
'name']].rename(columns={'name': 'Category'})
geography = basetree.merge(
treedefitems_merge, how='inner', on='Category')
geography.rename(columns={
'Name': 'name',
'ID': 'geography_sourceid',
'ParentID': 'parent_sourceid'}, inplace=True)
geography.geography_sourceid = geography.geography_sourceid.astype(int)
geography.sort_values(by='geography_sourceid', inplace=True)
geography.parent_sourceid = (
geography.parent_sourceid.dropna().astype(int).astype(str))
geography[output_columns].to_csv('geography.csv', index=False, float='%g')
|
cc0-1.0
| -5,945,689,383,502,212,000
| 31.039735
| 79
| 0.622778
| false
| 3.450785
| false
| false
| false
|
jeKnowledge/horarios-inforestudante
|
TimetableMaker.py
|
1
|
8893
|
from itertools import combinations
# Recebe dicionario de aulas de estrutura:
# { CLASS_ID:{
# T:{
# CLASS_T1,
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# }
# Devolve array de todas as combinacoes de turmas possiveis
# Refere-se aos elementos no dicionario por tuples: (aula, tipo, turma)
# IGNORA SOBREPOSICOES
def possibleCombinations(dictionary):
# Combinacoes de turmas validos (todos os tipos presentes)
# Para cada aula, i.e., e necessario fazer combinacao de turmas
combTurmasValidas = []
aulas = [] # List de todas as aulas por numero
#Fazer combinacoes dentro de cada aula
for aula in dictionary:
turmas = [] # List de de todas as turmas nesta aula (disciplina), como tuple
tipos = [] # Tipos de aula (T/TP/PL)
for tipo in dictionary[aula]:
tipos.append(tipo)
for turma in dictionary[aula][tipo]:
turmas.append((aula, tipo, turma))
combTurmas = combinations(turmas, len(tipos)) # Todas as combinacoes possiveis, incluindo (TP,TP,TP,TP)
for comb in combTurmas:
tiposNaComb = [] # Quais os tipos de aula nesta combinacao; deverao ser todos
for turma in comb:
tipo = turma[1] # Cada turma é representada por uma tuple (aula, tipo, turma); turma[1] devolve tipo
if tipo not in tiposNaComb:
tiposNaComb.append(tipo)
#Se a combinacao nao possuir todos os tipos de aula nao e valida
if set(tiposNaComb) != set(tipos):
continue
combTurmasValidas.append(comb)
aulas.append(aula)
# Fazer combinacoes de aulas, tendo em conta combinacoes "legais" de turmas
# Pelo mesmo processo que para as aulas:
# Fazer todas as combinacoes possiveis e remover as que nao incluirem todas as aulas
combAulas = combinations(combTurmasValidas, len(aulas))
combAulasValidas = [] # Todas as combinacoes de turmas
for comb in combAulas:
aulasInComb = [] # List de aulas incluidas nesta combinacao; deverao ser todas
for turmaComb in comb: # Combinacao de turmas para uma aula; tira-se o id da aula pelo primeiro elemento
if turmaComb[0][0] not in aulasInComb:
aulasInComb.append(turmaComb[0][0]) # comb[0] == (aula, tipo, turma); tuple[0] == aula
# Se esta combinacao de turmas possuir nao todas as aulas, nao e valida
if set(aulasInComb) != set(aulas):
continue
# Verificar se a combinação nao existe ja sob outra ordem
existe = False
for combValida in combAulasValidas:
if set(combValida) == set(comb):
existe = True
break
if existe:
continue
combAulasValidas.append(comb)
return combAulasValidas
# Recebe input:
# Dicionario:
# { CLASS_ID:{
# T:{
# [T1_obj, T1_obj, T1_obj,...],
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# Combinacoes validas de aulas:
# ( ( ((aula1, tipo1, turma1), (aula1, tipo2, turma1)), ((aula2, tipo1, turma1), (aula2, tipo2, turma1)) ), ... )
# Verifica se ha sobreposicao de aulas e se houver, remove-as
# Devolve lista de combinacoes sem sobreposicoes
def removeOverlaps(dictionary, validCombinations):
noOverlaps = [] # Resultado a devolver
for comb in validCombinations:
turmas = [] # turmas com "coordenadas", sob a forma (horaInicio, horaFim, (aula, tipo, turma))
# Criar tuples de horas e colocar na array
for aulaComb in comb:
for turma in aulaComb:
aulas = dictionary[turma[0]][turma[1]][turma[2]] # Tirar objetos Aula do dicionario (multiplos!)
for aula in aulas:
# Criar tuple com horas inicio/fim, dia e turma (disciplina/tipo/turma)
ref = (aula.horaInicio, aula.horaFim, aula.dia, (turma[0], turma[1], turma[2]))
turmas.append(ref)
# Criar pares
todosPares = combinations(turmas, 2)
pares = []
# Retirar pares de mesmas aulas
for par in todosPares:
# Verificar se turmas diferentes
turmaA = par[0][3]
turmaB = par[1][3]
if turmaA[0] != turmaB[0] or turmaA[1] != turmaB[1] or turmaA[2] != turmaB[2]:
pares.append(par)
# Verificar sobreposicao em cada par
combSemSobreposicoes = True
for par in pares:
a = par[0]
b = par[1]
# Dias diferentes?
if a[2] != b[2]:
continue
cedo = min(a[0], b[0])
tarde = max(a[1], b[1])
delta = tarde - cedo
# Aulas sobrepoem-se
if a[1]-a[0]+b[1]-b[0] > delta:
combSemSobreposicoes = False
break
if combSemSobreposicoes:
noOverlaps.append(comb)
return noOverlaps
from openpyxl import Workbook
from openpyxl.styles import Color, PatternFill, Style, Fill, Font
from random import randint
# Recebe input:
# Dicionario:
# { CLASS_ID:{
# T:{
# [T1_obj, T1_obj, T1_obj,...],
# CLASS_T...
# },
# TP:{
# CLASS_TP...
# },
# ...
# }, ...
# Combinacoes de aulas:
# ( ( ((aula1, tipo1, turma1), (aula1, tipo2, turma1)), ((aula2, tipo1, turma1), (aula2, tipo2, turma1)) ), ... )
# Grava um ficheiro xlsm (output.xlsm)
# Devolve workbook do openpyxl
def outputExcel(dictionary, combinations):
if len(combinations) == 0:
print("No combinations!")
return
wb = Workbook()
wb.remove_sheet(wb.active) # Apagar folha default
combinationNumber = 0
for comb in combinations:
ws = wb.create_sheet(str(combinationNumber)) # Criar uma nova folha com um id para referencia
# Labels de dia
ws['B1'] = "Segunda"
ws['C1'] = "Terça"
ws['D1'] = "Quarta"
ws['E1'] = "Quinta"
ws['F1'] = "Sexta"
ws['G1'] = "Sabado"
ws['H1'] = "Domingo"
# Labels de hora (30/30 minutos, das 8 as 22)
i = 2
for n in range(80,220,5):
ws['A'+str(i)] = str(int(n/10)) + "h" + str(int(((n/10)%1)*60)) + "m"
i += 1
# Desenhar aulas
for disciplina in comb:
for coord in disciplina:
aulaObjList = dictionary[coord[0]][coord[1]][coord[2]]
for aulaObj in aulaObjList:
# Tirar meia hora ao fim, para que nao haja merge sobreposto
cellRange = diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaInicio) + ":"\
+ diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaFim - 0.5)
ws.merge_cells(cellRange)
# Add label
ws[diaParaLetra(aulaObj.dia) + horaParaNumero(aulaObj.horaInicio)] = aulaObj.aulaNome +\
"," + aulaObj.turma
combinationNumber += 1 # Para referencia
wb.save('output.xlsx')
return wb
# ______ Helper functions para output: _________
def diaParaLetra(dia):
if dia == 0:
return "B"
if dia == 1:
return "C"
if dia == 2:
return "D"
if dia == 3:
return "E"
if dia == 4:
return "F"
if dia == 5:
return "G"
if dia == 6:
return "H"
def horaParaNumero(hora):
delta = hora - 8
return str(int(delta/0.5) + 2)
# _____________________________________________
# XLSXtoHTMLdemo
# Program to convert the data from an XLSX file to HTML.
# Uses the openpyxl library.
# Author: Vasudev Ram - http://www.dancingbison.com
# Altered by Miguel Murça for the purposes of this program
import openpyxl
from openpyxl import load_workbook
def convertExcelToWeb(workbook):
worksheets = workbook._sheets
for worksheet in worksheets:
html_data = """
<html>
<head>
<title>
Horario
</title>
<head>
<body>
<table>
"""
ws_range = worksheet.iter_rows('A1:I30')
for row in ws_range:
html_data += "<tr>"
for cell in row:
if cell.value is None:
html_data += "<td>" + ' ' + "<td>"
else:
html_data += "<td>" + str(cell.value) + "<td>"
html_data += "<tr>"
html_data += "</table>\n</body>\n</html>"
with open(worksheet.title + ".html", "w") as html_fil:
html_fil.write(html_data)
# EOF
|
mit
| -8,867,499,705,870,523,000
| 29.337884
| 117
| 0.540279
| false
| 3.071182
| false
| false
| false
|
pinballwizard/phone
|
sms/migrations/false/0005_subscriber.py
|
1
|
1070
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-24 04:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sms', '0004_smssended_delivered'),
]
operations = [
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile', models.CharField(max_length=11, unique=True, verbose_name='Номер телефона')),
('account', models.CharField(max_length=12, verbose_name='Лицевой счет')),
('blocked', models.BooleanField(verbose_name='Заблокирован')),
('ban_date', models.DateField(verbose_name='Дата блокировки')),
],
options={
'verbose_name': 'Абонент',
'verbose_name_plural': 'Абоненты',
},
),
]
|
lgpl-3.0
| -2,315,646,766,958,779,400
| 33.655172
| 114
| 0.566169
| false
| 3.513986
| false
| false
| false
|
googlecodelabs/nest-tensorflow
|
codelab/classify.py
|
1
|
3339
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import os.path
import numpy as np
import tensorflow as tf
from node_lookup import NodeLookup
from errors import error_result
snapshot_file = 'tmp/tmp.jpg'
model_dir = 'tmp/imagenet'
num_top_predictions = 5
def classify_remote_image(image_url):
# Attempt to Download
try:
image = download_image(image_url)
except IOError:
return error_result("Camera's Snapshot URL could not be downloaded")
# Attempt to Classify
try:
results = run_inference_on_image(image)
except:
return error_result("Could not classify the image")
return {
"image_url": image_url,
"results": results
}
def create_graph():
with tf.gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'
), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-num_top_predictions:][::-1]
results = {}
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
results[human_string] = float(score)
return results
def download_image(url):
# Downloads the image from the specified URL to the filesystem
response = urllib2.urlopen(url)
body = response.read()
if body == '':
raise IOError('The Snapshot URL did not contain any HTTP body when fetched')
with open(snapshot_file, 'w') as f:
f.write(body)
return snapshot_file
|
apache-2.0
| 8,367,322,700,183,847,000
| 30.5
| 84
| 0.649596
| false
| 3.91442
| false
| false
| false
|
mikejthomas/biote100_pset2
|
mode.py
|
1
|
2238
|
#Python Problem 3
#mode.py
#Introduction to Bioinformatics Assignment 2
#Purpose:Calculating the mode of a dataset
#Your Name: Michael Thomas
#Date: 10/12/15
#will contain all the data of a list including some unwanted carriage return symbols
tempList = []
#will contain the data as floats after it is stripped and the header is removed
myData = []
#will store the header
header = ""
#this will store the value of the mode
mode = 0
#TASK1
#We have the file data.txt read all the values of this file in the list tempList
#open data.txt and save to a tempList
text_file = open('data.txt' , 'r')
tempList = text_file.readlines()
text_file.close()
print tempList
#TASK2
#We don't want the header to be in the list Pop it and save it into the header variable
#pop out header (position 0 in list tempList) and save to header.
header = tempList.pop(0).strip()
print header
#TASK3
#for every member of tempList, clean it up from carriage return, convert it into a float and add it to the list myData
#Using list comprehension, we delete all '\r\n' from each line in tempList
myData = [line.rstrip('\r\n') for line in tempList]
#Simialr to the list comprehension above, we convert all items to floats
myData = [float(i) for i in myData]
print myData
#print type(myData[1])
#TASK4
#Sort the list myData
myData.sort()
print myData
#TASK5
#using the list of floats myData Find the MODE of the data
#The mode of a dataset is the number that occurs most frequently.
#i.e. in the list [2,2,3,3,3,4] the mode is 3
#create dictionary myDic
myDic = {}
#using exceptions, we can incriment the key if a repeat
#value is found, except if the value is unique
#this will create a counter that will contain
#len(myData) keys and their corresponding values
#that each key repeats in myData
for i in myData:
try:
myDic[i] += 1
except:
myDic[i] = 1
print myDic
#calculate the maximum values in the dictionary
#this will represent the value of the mode
maxval = max(myDic.values())
#for loop to print the key for the
#corresponding value of maxval which will
#be the mode for the dataset
for key, value in myDic.items():
if value == maxval:
mode = key
#print results
print "\n"
print "The mode of the:", header, " dataset is: ", mode
print "\n"
|
mit
| -4,476,699,867,430,466,000
| 28.447368
| 118
| 0.743074
| false
| 3.238784
| false
| false
| false
|
ddragon15/Overlooked-OvercookedFangame-WIP-
|
items.py
|
1
|
4159
|
import pygame
from pygame.locals import *
import math
import random
import magic
class All():
DebugBool = False
DebugV = [0,0]
isHold = False
isOccupied = False
processable = True
def Draw(self):
playerrot = pygame.transform.rotate(self.image ,self.rot)
playerpos1 = (self.pos[0]-32, self.pos[1]-32)
magic.mapScreen.blit(playerrot, playerpos1)
# pygame.draw.rect(magic.mapScreen, (50,50,131), pygame.Rect((x,y),(64,64)))
def checkCollision(self, pos):
boxrect = pygame.Rect((pos[0],pos[1]),(20,20))
myRect = pygame.Rect((self.pos[0]-16,self.pos[1]-16),(34,34))
# self.DebugV = myRect
# self.DebugBool = True
boxrect.topleft = [pos[0],pos[1]]
if myRect.colliderect(boxrect):
return True
else:
return False
def Debug(self):
if self.DebugBool:
pygame.draw.rect(magic.mapScreen, (50,250,131), self.DebugV)
# self.DebugV[0] = self.pos[0]-8
# self.DebugV[1] = self.y-8
# self.DebugBool = True
def setPos(self, pos):
self.pos = [pos[0]+8,pos[1]+8]
class Onion(All):
tag = "onion"
def __init__(self, x, y):
self.skin = "resources/images/onion.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/onionS.png":
self.skin = "resources/images/onionS.png"
self.image = pygame.image.load(self.skin)
class Tomato(All):
tag = "tomato"
def __init__(self, x, y):
self.skin = "resources/images/tomato.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/tomatoS.png":
self.skin = "resources/images/tomatoS.png"
self.image = pygame.image.load(self.skin)
class Lettuce(All):
tag = "lettuce"
def __init__(self, x, y):
self.skin = "resources/images/lettuce.png"
self.image = pygame.image.load(self.skin)
# w,h = self.image.get_size()
# self.image = pygame.transform.scale(self.image, (int(w),int(h)))
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0 #random.randint(0, 360)*1
def Update(self):
All.Draw(self)
All.Debug(self)
def changeSkin(self):
if self.skin is not "resource/image/lettuceS.png":
self.skin = "resources/images/lettuceS.png"
self.image = pygame.image.load(self.skin)
class Plate(All):
processable = False
# TODO make states for different Foods
def __init__(self, x, y):
self.skin = "resources/images/plate.png"
self.image = pygame.image.load(self.skin)
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
self.pos = [x+32,y+32]
self.rot = 0
def Update(self):
All.Draw(self)
All.Debug(self)
# TODO Plate states
# If an item sits ontop of the Plate
# Loop through Combinations out of all incedience onto the plate plus the new one
# Take the first one all incredience work on
# Consume the item (delete it)
# Change Skin
# TODO Make a map out of all recipies (maybe in another File)
# Which items are needet?
# Can it be processed by something?
# Which state is the plate in? Choose Skin for swap and return it
|
gpl-3.0
| 3,271,743,681,788,671,000
| 29.580882
| 89
| 0.582592
| false
| 3.182096
| false
| false
| false
|
caneruguz/osf.io
|
api/base/settings/defaults.py
|
1
|
8833
|
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'osf.db.backends.postgresql', # django.db.backends.postgresql
'NAME': os.environ.get('OSF_DB_NAME', 'osf'),
'USER': os.environ.get('OSF_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('OSF_DB_PASSWORD', ''),
'HOST': os.environ.get('OSF_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OSF_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
}
}
DATABASE_ROUTERS = ['osf.db.router.PostgreSQLFailoverRouter', ]
CELERY_IMPORTS = [
'osf.management.commands.migratedata',
'osf.management.commands.migraterelations',
'osf.management.commands.verify',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_USER_MODEL = 'osf.OSFUser'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEV_MODE = osf_settings.DEV_MODE
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'api'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'api-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
CSRF_COOKIE_HTTPONLY = osf_settings.SECURE_MODE
ALLOWED_HOSTS = [
'.osf.io'
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'raven.contrib.django.raven_compat',
'django_extensions',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.bitbucket',
'addons.box',
'addons.dataverse',
'addons.dropbox',
'addons.figshare',
'addons.forward',
'addons.github',
'addons.googledrive',
'addons.mendeley',
'addons.owncloud',
'addons.s3',
'addons.twofactor',
'addons.wiki',
'addons.zotero',
)
# local development using https
if osf_settings.SECURE_MODE and DEBUG:
INSTALLED_APPS += ('sslserver',)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'api'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
BULK_SETTINGS = {
'DEFAULT_BULK_LIMIT': 100
}
MAX_PAGE_SIZE = 100
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
# Order is important here because of a bug in rest_framework_swagger. For now,
# rest_framework.renderers.JSONRenderer needs to be first, at least until
# https://github.com/marcgibbons/django-rest-swagger/issues/271 is resolved.
'DEFAULT_RENDERER_CLASSES': (
'api.base.renderers.JSONAPIRenderer',
'api.base.renderers.JSONRendererWithESISupport',
'api.base.renderers.BrowsableAPIRendererNoForms',
),
'DEFAULT_PARSER_CLASSES': (
'api.base.parsers.JSONAPIParser',
'api.base.parsers.JSONAPIParserForRegularJSON',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'EXCEPTION_HANDLER': 'api.base.exceptions.json_api_exception_handler',
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'api.base.content_negotiation.JSONAPIContentNegotiation',
'DEFAULT_VERSIONING_CLASS': 'api.base.versioning.BaseVersioning',
'DEFAULT_VERSION': '2.0',
'ALLOWED_VERSIONS': (
'2.0',
'2.1',
'2.2',
'2.3',
'2.4',
'2.5',
'2.6',
),
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.ODMOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication'
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'api.base.throttling.NonCookieAuthThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '10000/day',
'non-cookie-auth': '100/hour',
'add-contributor': '10/second',
'create-guid': '1000/hour',
'root-anon-throttle': '1000/hour',
'test-user': '2/hour',
'test-anon': '1/hour',
}
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()
MIDDLEWARE_CLASSES = (
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
# A profiling middleware. ONLY FOR DEV USE
# Uncomment and add "prof" to url params to recieve a profile for that url
# 'api.base.middleware.ProfileMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'api.base.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True
}]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
# Disabled to make a test work (TestNodeLog.test_formatted_date)
# TODO Try to understand what's happening to cause the test to break when that line is active.
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('rest_framework_swagger/css', os.path.join(BASE_DIR, 'static/css')),
('rest_framework_swagger/images', os.path.join(BASE_DIR, 'static/images')),
)
# TODO: Revisit methods for excluding private routes from swagger docs
SWAGGER_SETTINGS = {
'api_path': '/',
'info': {
'description':
"""
Welcome to the fine documentation for the Open Science Framework's API! Please click
on the <strong>GET /v2/</strong> link below to get started.
For the most recent docs, please check out our <a href="/v2/">Browsable API</a>.
""",
'title': 'OSF APIv2 Documentation',
},
'doc_expansion': 'list',
'exclude_namespaces': ['applications', 'tokens', 'test'],
}
NODE_CATEGORY_MAP = osf_settings.NODE_CATEGORY_MAP
DEBUG_TRANSACTIONS = DEBUG
JWT_SECRET = 'osf_api_cas_login_jwt_secret_32b'
JWE_SECRET = 'osf_api_cas_login_jwe_secret_32b'
ENABLE_VARNISH = osf_settings.ENABLE_VARNISH
ENABLE_ESI = osf_settings.ENABLE_ESI
VARNISH_SERVERS = osf_settings.VARNISH_SERVERS
ESI_MEDIA_TYPES = osf_settings.ESI_MEDIA_TYPES
ADDONS_FOLDER_CONFIGURABLE = ['box', 'dropbox', 's3', 'googledrive', 'figshare', 'owncloud']
ADDONS_OAUTH = ADDONS_FOLDER_CONFIGURABLE + ['dataverse', 'github', 'bitbucket', 'mendeley', 'zotero', 'forward']
BYPASS_THROTTLE_TOKEN = 'test-token'
OSF_SHELL_USER_IMPORTS = None
# Settings for use in the admin
OSF_URL = 'https://osf.io'
|
apache-2.0
| 4,021,562,499,466,621,400
| 29.458621
| 113
| 0.681535
| false
| 3.323175
| true
| false
| false
|
mozilla/stoneridge
|
wpr/httparchive.py
|
1
|
25649
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View and edit HTTP Archives.
To list all URLs in an archive:
$ ./httparchive.py ls archive.wpr
To view the content of all URLs from example.com:
$ ./httparchive.py cat --host example.com archive.wpr
To view the content of a particular URL:
$ ./httparchive.py cat --host www.example.com --path /foo archive.wpr
To view the content of all URLs:
$ ./httparchive.py cat archive.wpr
To edit a particular URL:
$ ./httparchive.py edit --host www.example.com --path /foo archive.wpr
"""
import difflib
import email.utils
import httplib
import httpzlib
import json
import logging
import optparse
import os
import persistentmixin
import StringIO
import subprocess
import sys
import tempfile
import urlparse
import platformsettings
class HttpArchiveException(Exception):
"""Base class for all exceptions in httparchive."""
pass
class HttpArchive(dict, persistentmixin.PersistentMixin):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values.
PersistentMixin adds CreateNew(filename), Load(filename), and Persist().
Attributes:
server_rtt: dict of {hostname, server rtt in milliseconds}
"""
def __init__(self):
self.server_rtt = {}
def get_server_rtt(self, server):
"""Retrieves the round trip time (rtt) to the server
Args:
server: the hostname of the server
Returns:
round trip time to the server in seconds, or 0 if unavailable
"""
if server not in self.server_rtt:
platform_settings = platformsettings.get_platform_settings()
self.server_rtt[server] = platform_settings.ping(server)
return self.server_rtt[server]
def get(self, request, default=None):
"""Return the archived response for a given request.
Does extra checking for handling some HTTP request headers.
Args:
request: instance of ArchivedHttpRequest
default: default value to return if request is not found
Returns:
Instance of ArchivedHttpResponse or default if no matching
response is found
"""
if request in self:
return self[request]
return self.get_conditional_response(request, default)
def get_conditional_response(self, request, default):
"""Get the response based on the conditional HTTP request headers.
Args:
request: an ArchivedHttpRequest representing the original request.
default: default ArchivedHttpResponse
original request with matched headers removed.
Returns:
an ArchivedHttpResponse with a status of 200, 302 (not modified), or
412 (precondition failed)
"""
response = default
if request.is_conditional():
stripped_request = request.create_request_without_conditions()
if stripped_request in self:
response = self[stripped_request]
if response.status == 200:
status = self.get_conditional_status(request, response)
if status != 200:
response = create_response(status)
return response
def get_conditional_status(self, request, response):
status = 200
last_modified = email.utils.parsedate(
response.get_header_case_insensitive('last-modified'))
response_etag = response.get_header_case_insensitive('etag')
is_get_or_head = request.command.upper() in ('GET', 'HEAD')
match_value = request.headers.get('if-match', None)
if match_value:
if self.is_etag_match(match_value, response_etag):
status = 200
else:
status = 412 # precondition failed
none_match_value = request.headers.get('if-none-match', None)
if none_match_value:
if self.is_etag_match(none_match_value, response_etag):
status = 304
elif is_get_or_head:
status = 200
else:
status = 412
if is_get_or_head and last_modified:
for header in ('if-modified-since', 'if-unmodified-since'):
date = email.utils.parsedate(request.headers.get(header, None))
if date:
if ((header == 'if-modified-since' and last_modified > date) or
(header == 'if-unmodified-since' and last_modified < date)):
if status != 412:
status = 200
else:
status = 304 # not modified
return status
def is_etag_match(self, request_etag, response_etag):
"""Determines whether the entity tags of the request/response matches.
Args:
request_etag: the value string of the "if-(none)-match:"
portion of the request header
response_etag: the etag value of the response
Returns:
True on match, False otherwise
"""
response_etag = response_etag.strip('" ')
for etag in request_etag.split(','):
etag = etag.strip('" ')
if etag in ('*', response_etag):
return True
return False
def get_requests(self, command=None, host=None, path=None, use_query=True):
"""Return a list of requests that match the given args."""
return [r for r in self if r.matches(command, host, path,
use_query=use_query)]
def ls(self, command=None, host=None, path=None):
"""List all URLs that match given params."""
return ''.join(sorted(
'%s\n' % r for r in self.get_requests(command, host, path)))
def cat(self, command=None, host=None, path=None):
"""Print the contents of all URLs that match given params."""
out = StringIO.StringIO()
for request in self.get_requests(command, host, path):
print >>out, str(request)
print >>out, 'Untrimmed request headers:'
for k in request.headers:
print >>out, ' %s: %s' % (k, request.headers[k])
if request.request_body:
print >>out, request.request_body
print >>out, '---- Response Info', '-' * 51
response = self[request]
chunk_lengths = [len(x) for x in response.response_data]
print >>out, ('Status: %s\n'
'Reason: %s\n'
'Headers delay: %s\n'
'Response headers:') % (
response.status, response.reason, response.delays['headers'])
for k, v in response.headers:
print >>out, ' %s: %s' % (k, v)
print >>out, ('Chunk count: %s\n'
'Chunk lengths: %s\n'
'Chunk delays: %s') % (
len(chunk_lengths), chunk_lengths, response.delays['data'])
body = response.get_data_as_text()
print >>out, '---- Response Data', '-' * 51
if body:
print >>out, body
else:
print >>out, '[binary data]'
print >>out, '=' * 70
return out.getvalue()
def edit(self, command=None, host=None, path=None):
"""Edits the single request which matches given params."""
editor = os.getenv('EDITOR')
if not editor:
print 'You must set the EDITOR environmental variable.'
return
matching_requests = self.get_requests(command, host, path)
if not matching_requests:
print 'Failed to find any requests matching given command, host, path.'
return
if len(matching_requests) > 1:
print 'Found multiple matching requests. Please refine.'
print self.ls(command, host, path)
response = self[matching_requests[0]]
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(response.get_response_as_text())
tmp_file.close()
subprocess.check_call([editor, tmp_file.name])
response.set_response_from_text(''.join(open(tmp_file.name).readlines()))
os.remove(tmp_file.name)
def _format_request_lines(self, req):
"""Format request to make diffs easier to read.
Args:
req: an ArchivedHttpRequest
Returns:
Example:
['GET www.example.com/path\n', 'Header-Key: header value\n', ...]
"""
parts = ['%s %s%s\n' % (req.command, req.host, req.path)]
if req.request_body:
parts.append('%s\n' % req.request_body)
for k, v in req.trimmed_headers:
k = '-'.join(x.capitalize() for x in k.split('-'))
parts.append('%s: %s\n' % (k, v))
return parts
def find_closest_request(self, request, use_path=False):
"""Find the closest matching request in the archive to the given request.
Args:
request: an ArchivedHttpRequest
use_path: If True, closest matching request's path component must match.
(Note: this refers to the 'path' component within the URL, not the
query string component.)
If use_path=False, candidate will NOT match in example below
e.g. request = GET www.test.com/path?aaa
candidate = GET www.test.com/diffpath?aaa
Returns:
If a close match is found, return the instance of ArchivedHttpRequest.
Otherwise, return None.
"""
best_match = None
request_lines = self._format_request_lines(request)
matcher = difflib.SequenceMatcher(b=''.join(request_lines))
path = None
if use_path:
path = request.path
for candidate in self.get_requests(request.command, request.host, path,
use_query=not use_path):
candidate_lines = self._format_request_lines(candidate)
matcher.set_seq1(''.join(candidate_lines))
best_match = max(best_match, (matcher.ratio(), candidate))
if best_match:
return best_match[1]
return None
def diff(self, request):
"""Diff the given request to the closest matching request in the archive.
Args:
request: an ArchivedHttpRequest
Returns:
If a close match is found, return a textual diff between the requests.
Otherwise, return None.
"""
request_lines = self._format_request_lines(request)
closest_request = self.find_closest_request(request)
if closest_request:
closest_request_lines = self._format_request_lines(closest_request)
return ''.join(difflib.ndiff(closest_request_lines, request_lines))
return None
class ArchivedHttpRequest(object):
"""Record all the state that goes into a request.
ArchivedHttpRequest instances are considered immutable so they can
serve as keys for HttpArchive instances.
(The immutability is not enforced.)
Upon creation, the headers are "trimmed" (i.e. edited or dropped)
and saved to self.trimmed_headers to allow requests to match in a wider
variety of playback situations (e.g. using different user agents).
For unpickling, 'trimmed_headers' is recreated from 'headers'. That
allows for changes to the trim function and can help with debugging.
"""
CONDITIONAL_HEADERS = [
'if-none-match', 'if-match',
'if-modified-since', 'if-unmodified-since']
def __init__(self, command, host, path, request_body, headers, is_ssl=False):
"""Initialize an ArchivedHttpRequest.
Args:
command: a string (e.g. 'GET' or 'POST').
host: a host name (e.g. 'www.google.com').
path: a request path (e.g. '/search?q=dogs').
request_body: a request body string for a POST or None.
headers: {key: value, ...} where key and value are strings.
is_ssl: a boolean which is True iff request is make via SSL.
"""
self.command = command
self.host = host
self.path = path
self.request_body = request_body
self.headers = headers
self.is_ssl = is_ssl
self.trimmed_headers = self._TrimHeaders(headers)
def __str__(self):
scheme = 'https' if self.is_ssl else 'http'
return '%s %s://%s%s %s' % (
self.command, scheme, self.host, self.path, self.trimmed_headers)
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body,
self.trimmed_headers, self.is_ssl))
def __hash__(self):
"""Return a integer hash to use for hashed collections including dict."""
return hash(repr(self))
def __eq__(self, other):
"""Define the __eq__ method to match the hash behavior."""
return repr(self) == repr(other)
def __setstate__(self, state):
"""Influence how to unpickle.
"headers" are the original request headers.
"trimmed_headers" are the trimmed headers used for matching requests
during replay.
Args:
state: a dictionary for __dict__
"""
if 'full_headers' in state:
# Fix older version of archive.
state['headers'] = state['full_headers']
del state['full_headers']
if 'headers' not in state:
raise HttpArchiveException(
'Archived HTTP request is missing "headers". The HTTP archive is'
' likely from a previous version and must be re-recorded.')
state['trimmed_headers'] = self._TrimHeaders(dict(state['headers']))
if 'is_ssl' not in state:
state['is_ssl'] = False
self.__dict__.update(state)
def __getstate__(self):
"""Influence how to pickle.
Returns:
a dict to use for pickling
"""
state = self.__dict__.copy()
del state['trimmed_headers']
return state
def matches(self, command=None, host=None, path_with_query=None,
use_query=True):
"""Returns true iff the request matches all parameters.
Args:
command: a string (e.g. 'GET' or 'POST').
host: a host name (e.g. 'www.google.com').
path_with_query: a request path with query string (e.g. '/search?q=dogs')
use_query:
If use_query is True, request matching uses both the hierarchical path
and query string component.
If use_query is False, request matching only uses the hierarchical path
e.g. req1 = GET www.test.com/index?aaaa
req2 = GET www.test.com/index?bbbb
If use_query is True, req1.matches(req2) evaluates to False
If use_query is False, req1.matches(req2) evaluates to True
Returns:
True iff the request matches all parameters
"""
path_match = path_with_query == self.path
if not use_query:
self_path = urlparse.urlparse('http://%s%s' % (
self.host or '', self.path or '')).path
other_path = urlparse.urlparse('http://%s%s' % (
host or '', path_with_query or '')).path
path_match = self_path == other_path
return ((command is None or command == self.command) and
(host is None or host == self.host) and
(path_with_query is None or path_match))
@classmethod
def _TrimHeaders(cls, headers):
"""Removes headers that are known to cause problems during replay.
These headers are removed for the following reasons:
- accept: Causes problems with www.bing.com. During record, CSS is fetched
with *. During replay, it's text/css.
- accept-charset, accept-language, referer: vary between clients.
- connection, method, scheme, url, version: Cause problems with spdy.
- cookie: Extremely sensitive to request/response order.
- keep-alive: Not supported by Web Page Replay.
- user-agent: Changes with every Chrome version.
- proxy-connection: Sent for proxy requests.
Another variant to consider is dropping only the value from the header.
However, this is particularly bad for the cookie header, because the
presence of the cookie depends on the responses we've seen when the request
is made.
Args:
headers: {header_key: header_value, ...}
Returns:
[(header_key, header_value), ...] # (with undesirable headers removed)
"""
# TODO(tonyg): Strip sdch from the request headers because we can't
# guarantee that the dictionary will be recorded, so replay may not work.
if 'accept-encoding' in headers:
headers['accept-encoding'] = headers['accept-encoding'].replace(
'sdch', '')
# A little clean-up
if headers['accept-encoding'].endswith(','):
headers['accept-encoding'] = headers['accept-encoding'][:-1]
undesirable_keys = [
'accept', 'accept-charset', 'accept-language',
'connection', 'cookie', 'keep-alive', 'method',
'referer', 'scheme', 'url', 'version', 'user-agent', 'proxy-connection']
return sorted([(k, v) for k, v in headers.items()
if k.lower() not in undesirable_keys])
def is_conditional(self):
"""Return list of headers that match conditional headers."""
for header in self.CONDITIONAL_HEADERS:
if header in self.headers:
return True
return False
def create_request_without_conditions(self):
stripped_headers = dict((k, v) for k, v in self.headers.iteritems()
if k.lower() not in self.CONDITIONAL_HEADERS)
return ArchivedHttpRequest(
self.command, self.host, self.path, self.request_body,
stripped_headers, self.is_ssl)
class ArchivedHttpResponse(object):
"""All the data needed to recreate all HTTP response."""
# CHUNK_EDIT_SEPARATOR is used to edit and view text content.
# It is not sent in responses. It is added by get_data_as_text()
# and removed by set_data().
CHUNK_EDIT_SEPARATOR = '[WEB_PAGE_REPLAY_CHUNK_BOUNDARY]'
# DELAY_EDIT_SEPARATOR is used to edit and view server delays.
DELAY_EDIT_SEPARATOR = ('\n[WEB_PAGE_REPLAY_EDIT_ARCHIVE --- '
'Delays are above. Response content is below.]\n')
def __init__(self, version, status, reason, headers, response_data,
delays=None):
"""Initialize an ArchivedHttpResponse.
Args:
version: HTTP protocol version used by server.
10 for HTTP/1.0, 11 for HTTP/1.1 (same as httplib).
status: Status code returned by server (e.g. 200).
reason: Reason phrase returned by server (e.g. "OK").
headers: list of (header, value) tuples.
response_data: list of content chunks.
Concatenating the chunks gives the complete contents
(i.e. the chunks do not have any lengths or delimiters).
Do not include the final, zero-length chunk that marks the end.
delays: dict of (ms) delays before "headers" and "data". For example,
{'headers': 50, 'data': [0, 10, 10]}
"""
self.version = version
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
self.delays = delays
self.fix_delays()
def fix_delays(self):
"""Initialize delays, or check the number of data delays."""
expected_num_delays = len(self.response_data)
if not self.delays:
self.delays = {
'headers': 0,
'data': [0] * expected_num_delays
}
else:
num_delays = len(self.delays['data'])
if num_delays != expected_num_delays:
raise HttpArchiveException(
'Server delay length mismatch: %d (expected %d): %s',
num_delays, expected_num_delays, self.delays['data'])
def __repr__(self):
return repr((self.version, self.status, self.reason, sorted(self.headers),
self.response_data))
def __hash__(self):
"""Return a integer hash to use for hashed collections including dict."""
return hash(repr(self))
def __eq__(self, other):
"""Define the __eq__ method to match the hash behavior."""
return repr(self) == repr(other)
def __setstate__(self, state):
"""Influence how to unpickle.
Args:
state: a dictionary for __dict__
"""
if 'server_delays' in state:
state['delays'] = {
'headers': 0,
'data': state['server_delays']
}
del state['server_delays']
elif 'delays' not in state:
state['delays'] = None
self.__dict__.update(state)
self.fix_delays()
def get_header(self, key, default=None):
for k, v in self.headers:
if key == k:
return v
return default
def get_header_case_insensitive(self, key):
for k, v in self.headers:
if key.lower() == k.lower():
return v
return None
def set_header(self, key, value):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers[i] = (key, value)
return
self.headers.append((key, value))
def remove_header(self, key):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers.pop(i)
return
def is_gzip(self):
return self.get_header('content-encoding') == 'gzip'
def is_compressed(self):
return self.get_header('content-encoding') in ('gzip', 'deflate')
def is_chunked(self):
return self.get_header('transfer-encoding') == 'chunked'
def get_data_as_text(self):
"""Return content as a single string.
Uncompresses and concatenates chunks with CHUNK_EDIT_SEPARATOR.
"""
content_type = self.get_header('content-type')
if (not content_type or
not (content_type.startswith('text/') or
content_type == 'application/x-javascript')):
return None
if self.is_compressed():
uncompressed_chunks = httpzlib.uncompress_chunks(
self.response_data, self.is_gzip())
else:
uncompressed_chunks = self.response_data
return self.CHUNK_EDIT_SEPARATOR.join(uncompressed_chunks)
def get_delays_as_text(self):
"""Return delays as editable text."""
return json.dumps(self.delays, indent=2)
def get_response_as_text(self):
"""Returns response content as a single string.
Server delays are separated on a per-chunk basis. Delays are in seconds.
Response content begins after DELAY_EDIT_SEPARATOR
"""
data = self.get_data_as_text()
if data is None:
logging.warning('Data can not be represented as text.')
data = ''
delays = self.get_delays_as_text()
return self.DELAY_EDIT_SEPARATOR.join((delays, data))
def set_data(self, text):
"""Inverse of get_data_as_text().
Split on CHUNK_EDIT_SEPARATOR and compress if needed.
"""
text_chunks = text.split(self.CHUNK_EDIT_SEPARATOR)
if self.is_compressed():
self.response_data = httpzlib.compress_chunks(text_chunks, self.is_gzip())
else:
self.response_data = text_chunks
if not self.is_chunked():
content_length = sum(len(c) for c in self.response_data)
self.set_header('content-length', str(content_length))
def set_delays(self, delays_text):
"""Inverse of get_delays_as_text().
Args:
delays_text: JSON encoded text such as the following:
{
headers: 80,
data: [6, 55, 0]
}
Times are in milliseconds.
Each data delay corresponds with one response_data value.
"""
try:
self.delays = json.loads(delays_text)
except (ValueError, KeyError) as e:
logging.critical('Unable to parse delays %s: %s', delays_text, e)
self.fix_delays()
def set_response_from_text(self, text):
"""Inverse of get_response_as_text().
Modifies the state of the archive according to the textual representation.
"""
try:
delays, data = text.split(self.DELAY_EDIT_SEPARATOR)
except ValueError:
logging.critical(
'Error parsing text representation. Skipping edits.')
return
self.set_delays(delays)
self.set_data(data)
def create_response(status, reason=None, headers=None, body=None):
"""Convenience method for creating simple ArchivedHttpResponse objects."""
if reason is None:
reason = httplib.responses.get(status, 'Unknown')
if headers is None:
headers = [('content-type', 'text/plain')]
if body is None:
body = "%s %s" % (status, reason)
return ArchivedHttpResponse(11, status, reason, headers, [body])
def main():
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + '\n'
else:
return ''
option_parser = optparse.OptionParser(
usage='%prog [ls|cat|edit] [options] replay_file',
formatter=PlainHelpFormatter(),
description=__doc__,
epilog='http://code.google.com/p/web-page-replay/')
option_parser.add_option('-c', '--command', default=None,
action='store',
type='string',
help='Only show URLs matching this command.')
option_parser.add_option('-o', '--host', default=None,
action='store',
type='string',
help='Only show URLs matching this host.')
option_parser.add_option('-p', '--path', default=None,
action='store',
type='string',
help='Only show URLs matching this path.')
options, args = option_parser.parse_args()
if len(args) != 2:
print 'args: %s' % args
option_parser.error('Must specify a command and replay_file')
command = args[0]
replay_file = args[1]
if not os.path.exists(replay_file):
option_parser.error('Replay file "%s" does not exist' % replay_file)
http_archive = HttpArchive.Load(replay_file)
if command == 'ls':
print http_archive.ls(options.command, options.host, options.path)
elif command == 'cat':
print http_archive.cat(options.command, options.host, options.path)
elif command == 'edit':
http_archive.edit(options.command, options.host, options.path)
http_archive.Persist(replay_file)
else:
option_parser.error('Unknown command "%s"' % command)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mpl-2.0
| 3,875,306,886,947,456,500
| 33.707713
| 80
| 0.644859
| false
| 3.841396
| false
| false
| false
|
georgistanev/django-dash
|
src/dash/contrib/plugins/rss_feed/dash_plugins.py
|
1
|
1179
|
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('BaseReadRSSFeedPlugin',)
from django.utils.translation import ugettext_lazy as _
from dash.base import BaseDashboardPlugin
from dash.factory import plugin_factory
from dash.contrib.plugins.rss_feed.forms import ReadRSSFeedForm
# ********************************************************************************
# ********************************* Base Read RSS feed plugin ********************
# ********************************************************************************
class BaseReadRSSFeedPlugin(BaseDashboardPlugin):
"""
Base Read RSS feed into HTML plugin.
"""
name = _("Read RSS feed")
form = ReadRSSFeedForm
group = _("Internet")
# ********************************************************************************
# ********** Generating and registering the plugins using factory ****************
# ********************************************************************************
sizes = (
(2, 3),
(3, 3),
)
plugin_factory(BaseReadRSSFeedPlugin, 'read_rss_feed', sizes)
|
gpl-2.0
| 3,955,246,857,589,229,600
| 35.84375
| 82
| 0.46056
| false
| 4.465909
| false
| false
| false
|
cleobulo/site-mapp2
|
crawl/linkanalyzer.py
|
1
|
4873
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#author Cleóbulo Bezerra < cleobulo.oliveira@gmail.com >
#file Defines a set of proceeds to verify the authenticity of the urls
from re import match, split, sub, search
from Queue import Queue
from urlparse import urljoin, urlparse
def init():
"""
Initializes the global variables required for build
a links parser. The goal of this parser should be to
verify that a URL is authentic.
1 - Verify the url syntax and classify them as relative or absolute;
2 - If possible, authenticate these URLs considered valid;
3 - Filter URLs, that is, maintain only those that lead to html pages.
"""
#Reserved Characters
global GEN_DELIMS
global SUB_DELIMS
global RESERVED
GEN_DELIMS = r"[/:\?#\[\]@]"
SUB_DELIMS = r"[!\$&\+\*,;`\(\)]"
RESERVED = GEN_DELIMS + "|" + SUB_DELIMS
#Unreserved Characters
global UNRESERVED
UNRESERVED = r"[\w\.-_~]"
#PATH (sub) components
global PCHAR
global SEGMENT
global SEGMENT_NZ
global SEGMENT_NZ_NC
PCHAR = r"(" + UNRESERVED + "|" + SUB_DELIMS + "|:|@)"
SEGMENT = PCHAR + "*"
SEGMENT_NZ = PCHAR + "+"
SEGMENT_NZ_NC = r"(" + UNRESERVED + "|" + SUB_DELIMS + "|@)+"
global PATH_ABEMPTY
global PATH_ABSOLUTE
global PATH_NOSCHEME
global PATH_ROOTLESS
PATH_ABEMPTY = r"(/" + SEGMENT + ")*"
PATH_ABSOLUTE = r"/(" + SEGMENT_NZ + "(/" + SEGMENT + ")*)"
PATH_NOSCHEME = SEGMENT_NZ_NC + "(/" + SEGMENT + ")*"
PATH_ROOTLESS = SEGMENT_NZ + "(/" + SEGMENT + ")*"
#The three main components of the syntactic structure of a URL.
global SCHEME
global HOST
global PATH
SCHEME = r"http(s)?:"
HOST = r"//(" + UNRESERVED + "|" + SUB_DELIMS + ")*"
PATH = r"(" + PATH_ABEMPTY + "|" + PATH_ABSOLUTE + "|" + PATH_NOSCHEME + "|" + PATH_ROOTLESS + "|)"
class _Token:
"""
This class represents each component of a URL syntax.
"""
def __init__(self, token):
self.__token = token
@property
def token(self):
return self.__token
class _UrlSyntaxTree:
"""
Represents a URL Syntax Tree for URL analysis. The goal is try to correct
or authenticate the URLs collected on the Web by some program or an user
input.
"""
def __init__(self):
self.__leftchild = None
self.__middlechild = None
self.__rightchild = None
def __urlsplit(self, url):
return split('([^\?#:/]+:|//[^\?#/]*)', url)
def build_absolute_url(self, url):
urlcomp = self.__urlsplit(url)
queuecomp = Queue()
for comp in urlcomp:
if comp != '':
queuecomp.put(_Token(comp))
while not queuecomp.empty():
currcomp = queuecomp.get()
if match(SCHEME, currcomp.token):
self.__leftchild = currcomp
elif match(HOST, currcomp.token):
self.__middlechild = currcomp
elif match(PATH, currcomp.token):
self.build_relative_url(currcomp.token)
def get_absolute_url(self):
if self.__leftchild != None and self.__middlechild != None and self.__rightchild != None:
return self.__leftchild.token + self.__middlechild.token + self.__rightchild.token
elif self.__leftchild != None and self.__middlechild != None:
return self.__leftchild.token + sub('(/|)$', '/', self.__middlechild.token)
else:
return None
def build_relative_url(self, path):
urlcomp = _Token(path)
if match(PATH, urlcomp.token):
self.__rightchild = urlcomp
def get_relative_url(self):
if self.get_absolute_url() == None and self.__rightchild != None:
return self.__rightchild.token
else:
return None
class LinkAnalyzer:
"""
Represents an object for URL analysis. This object seeks to
perform the syntax analysis and the filtering of these URLs.
"""
def __init__(self):
init()
def urleval(self, curl, furl = '/'):
self.__urltree = _UrlSyntaxTree()
url = curl if furl == '/' else furl
if match(SCHEME + HOST, url):
self.__urltree.build_absolute_url(url)
return self.__urltree.get_absolute_url()
elif match(PATH, url):
self.__urltree.build_relative_url(url)
return urljoin(curl, self.__urltree.get_relative_url())
return None
def urlfilter(self, url):
if search('^http(s)?://[^\?#/]+$', url):
return True
elif search('(/[^\?#\.:/]*)$', url):
return True
elif search('(\.html|\.htm|\.php|\.asp|\.jsp)$', url):
return True
else:
return False
|
mit
| 4,591,837,217,795,273,000
| 27.828402
| 103
| 0.562192
| false
| 3.721925
| false
| false
| false
|
nojhan/weboob-devel
|
modules/cuisineaz/pages.py
|
1
|
4451
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import Recipe, Comment
from weboob.capabilities.base import NotAvailable
from weboob.browser.pages import HTMLPage, pagination
from weboob.browser.elements import ItemElement, method, ListElement
from weboob.browser.filters.standard import CleanText, Regexp, Env, Time
from weboob.browser.filters.html import XPath, CleanHTML
import re
import datetime
class CuisineazDuration(Time):
klass = datetime.timedelta
_regexp = re.compile(r'((?P<hh>\d+) h)?((?P<mm>\d+) min)?(?P<ss>\d+)?')
kwargs = {'hours': 'hh', 'minutes': 'mm', 'seconds': 'ss'}
class ResultsPage(HTMLPage):
""" Page which contains results as a list of recipies
"""
@pagination
@method
class iter_recipes(ListElement):
item_xpath = '//div[@id="divRecette"]'
def next_page(self):
next = CleanText('//li[@class="next"]/span/a/@href',
default=None)(self)
if next:
return next
class item(ItemElement):
klass = Recipe
def condition(self):
return Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'/recettes/(.*).aspx',
default=None)(self.el)
obj_id = Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'/recettes/(.*).aspx')
obj_title = CleanText('./div[has-class("searchTitle")]/h2/a')
obj_thumbnail_url = CleanText('./div[has-class("searchImg")]/span/img[@data-src!=""]/@data-src|./div[has-class("searchImg")]/div/span/img[@src!=""]/@src',
default=None)
obj_short_description = CleanText('./div[has-class("searchIngredients")]')
class RecipePage(HTMLPage):
""" Page which contains a recipe
"""
@method
class get_recipe(ItemElement):
klass = Recipe
obj_id = Env('_id')
obj_title = CleanText('//div[@id="ficheRecette"]/h1')
obj_picture_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
obj_thumbnail_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
def obj_preparation_time(self):
_prep = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsPrepa"]'))(self)
return int(_prep.total_seconds() / 60)
def obj_cooking_time(self):
_cook = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsCuisson"]'))(self)
return int(_cook.total_seconds() / 60)
def obj_nb_person(self):
nb_pers = CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteNombre"]')(self)
return [nb_pers] if nb_pers else NotAvailable
def obj_ingredients(self):
ingredients = []
for el in XPath('//div[@id="ingredients"]/ul/li')(self):
ingredients.append(CleanText('.')(el))
return ingredients
obj_instructions = CleanHTML('//div[@id="preparation"]/span[@class="instructions"]')
@method
class get_comments(ListElement):
item_xpath = '//div[@class="comment pb15 row"]'
class item(ItemElement):
klass = Comment
obj_author = CleanText('./div[has-class("comment-left")]/div/div/div[@class="fs18 txtcaz mb5 first-letter"]')
obj_text = CleanText('./div[has-class("comment-right")]/div/p')
obj_id = CleanText('./@id')
def obj_rate(self):
return len(XPath('./div[has-class("comment-right")]/div/div/div/span/span[@class="icon icon-star"]')(self))
|
agpl-3.0
| 3,232,280,561,110,571,500
| 36.720339
| 166
| 0.611773
| false
| 3.663374
| false
| false
| false
|
jhseu/tensorflow
|
tensorflow/python/debug/lib/source_utils.py
|
1
|
13967
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions that help to inspect Python source w.r.t. TF graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import zipfile
import absl
import numpy as np
from tensorflow.python.debug.lib import profiling
_TENSORFLOW_BASEDIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.normpath(os.path.abspath(__file__))))))
_ABSL_BASEDIR = os.path.dirname(absl.__file__)
UNCOMPILED_SOURCE_SUFFIXES = (".py")
COMPILED_SOURCE_SUFFIXES = (".pyc", ".pyo")
def _norm_abs_path(file_path):
return os.path.normpath(os.path.abspath(file_path))
def is_extension_uncompiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in UNCOMPILED_SOURCE_SUFFIXES
def is_extension_compiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in COMPILED_SOURCE_SUFFIXES
def _convert_watch_key_to_tensor_name(watch_key):
return watch_key[:watch_key.rfind(":")]
def guess_is_tensorflow_py_library(py_file_path):
"""Guess whether a Python source file is a part of the tensorflow library.
Special cases:
1) Returns False for unit-test files in the library (*_test.py),
2) Returns False for files under python/debug/examples.
Args:
py_file_path: full path of the Python source file in question.
Returns:
(`bool`) Whether the file is a part of the tensorflow library.
Raises:
ValueError: if the extension name of py_file_path does not indicate a Python
source file (compiled or uncomplied).
"""
if (not is_extension_uncompiled_python_source(py_file_path) and
not is_extension_compiled_python_source(py_file_path)):
raise ValueError(
"Input file path (%s) is not a Python source file." % py_file_path)
py_file_path = _norm_abs_path(py_file_path)
return ((py_file_path.startswith(_TENSORFLOW_BASEDIR) or
py_file_path.startswith(_ABSL_BASEDIR)) and
not py_file_path.endswith("_test.py") and
(os.path.normpath("tensorflow/python/debug/examples") not in
os.path.normpath(py_file_path)))
def load_source(source_file_path):
"""Load the content of a Python source code file.
This function covers the following case:
1. source_file_path points to an existing Python (.py) file on the
file system.
2. source_file_path is a path within a .par file (i.e., a zip-compressed,
self-contained Python executable).
Args:
source_file_path: Path to the Python source file to read.
Returns:
A length-2 tuple:
- Lines of the source file, as a `list` of `str`s.
- The width of the string needed to show the line number in the file.
This is calculated based on the number of lines in the source file.
Raises:
IOError: if loading is unsuccessful.
"""
if os.path.isfile(source_file_path):
with open(source_file_path, "rb") as f:
source_text = f.read().decode("utf-8")
source_lines = source_text.split("\n")
else:
# One possible reason why the file doesn't exist is that it's a path
# inside a .par file. Try that possibility.
source_lines = _try_load_par_source(source_file_path)
if source_lines is None:
raise IOError(
"Source path neither exists nor can be loaded as a .par file: %s" %
source_file_path)
line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3
return source_lines, line_num_width
def _try_load_par_source(source_file_path):
"""Try loading the source code inside a .par file.
A .par file is a zip-compressed, self-contained Python executable.
It contains the content of individual Python source files that can
be read only through extracting from the zip file.
Args:
source_file_path: The full path to the file inside the .par file. This
path should include the path to the .par file itself, followed by the
intra-par path, e.g.,
"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py".
Returns:
If successful, lines of the source file as a `list` of `str`s.
Else, `None`.
"""
prefix_path = source_file_path
while True:
prefix_path, basename = os.path.split(prefix_path)
if not basename:
break
suffix_path = os.path.normpath(
os.path.relpath(source_file_path, start=prefix_path))
if prefix_path.endswith(".par") and os.path.isfile(prefix_path):
with zipfile.ZipFile(prefix_path) as z:
norm_names = [os.path.normpath(name) for name in z.namelist()]
if suffix_path in norm_names:
with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf:
source_text = zf.read().decode("utf-8")
return source_text.split("\n")
def annotate_source(dump,
source_file_path,
do_dumped_tensors=False,
file_stack_top=False,
min_line=None,
max_line=None):
"""Annotate a Python source file with a list of ops created at each line.
(The annotation doesn't change the source file itself.)
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
source_file_path: (`str`) Path to the source file being annotated.
do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be
used to annotate the source file.
file_stack_top: (`bool`) Whether only the top stack trace in the
specified source file is to be annotated.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a list of op name(s) created at
that line, or tensor names if `do_dumped_tensors` is True.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot perform source annotation due to a lack of set "
"Python graph in the dump object")
source_file_path = _norm_abs_path(source_file_path)
line_to_op_names = {}
for op in py_graph.get_operations():
for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):
if (min_line is not None and line_number < min_line or
max_line is not None and line_number >= max_line):
continue
if _norm_abs_path(file_path) != source_file_path:
continue
if do_dumped_tensors:
watch_keys = dump.debug_watch_keys(op.name)
# Convert watch keys to unique Tensor names.
items_to_append = list(
set(map(_convert_watch_key_to_tensor_name, watch_keys)))
else:
items_to_append = [op.name]
if line_number in line_to_op_names:
line_to_op_names[line_number].extend(items_to_append)
else:
line_to_op_names[line_number] = items_to_append
if file_stack_top:
break
return line_to_op_names
def list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
"""Generate a list of source files with information regarding ops and tensors.
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
path_regex_whitelist: A regular-expression filter for source file path.
node_name_regex_whitelist: A regular-expression filter for node names.
Returns:
A list of tuples regarding the Python source files involved in constructing
the ops and tensors contained in `dump`. Each tuple is:
(source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line)
is_tf_library: (`bool`) A guess of whether the file belongs to the
TensorFlow Python library.
num_nodes: How many nodes were created by lines of this source file.
These include nodes with dumps and those without.
num_tensors: How many Tensors were created by lines of this source file.
These include Tensors with dumps and those without.
num_dumps: How many debug Tensor dumps were from nodes (and Tensors)
that were created by this source file.
first_line: The first line number (1-based) that created any nodes or
Tensors in this source file.
The list is sorted by ascending order of source_file_path.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot generate source list due to a lack of set "
"Python graph in the dump object")
path_to_node_names = collections.defaultdict(set)
path_to_tensor_names = collections.defaultdict(set)
path_to_first_line = {}
tensor_name_to_num_dumps = {}
path_regex = (re.compile(path_regex_whitelist)
if path_regex_whitelist else None)
node_name_regex = (re.compile(node_name_regex_whitelist)
if node_name_regex_whitelist else None)
to_skip_file_paths = set()
for op in py_graph.get_operations():
if node_name_regex and not node_name_regex.match(op.name):
continue
for file_path, line_number, _, _ in dump.node_traceback(op.name):
file_path = _norm_abs_path(file_path)
if (file_path in to_skip_file_paths or
path_regex and not path_regex.match(file_path) or
not os.path.isfile(file_path)):
to_skip_file_paths.add(file_path)
continue
path_to_node_names[file_path].add(op.name)
if file_path in path_to_first_line:
if path_to_first_line[file_path] > line_number:
path_to_first_line[file_path] = line_number
else:
path_to_first_line[file_path] = line_number
for output_tensor in op.outputs:
tensor_name = output_tensor.name
path_to_tensor_names[file_path].add(tensor_name)
watch_keys = dump.debug_watch_keys(op.name)
for watch_key in watch_keys:
node_name, output_slot, debug_op = watch_key.split(":")
tensor_name = "%s:%s" % (node_name, output_slot)
if tensor_name not in tensor_name_to_num_dumps:
tensor_name_to_num_dumps[tensor_name] = len(
dump.get_tensors(node_name, int(output_slot), debug_op))
path_to_num_dumps = {}
for path in path_to_tensor_names:
path_to_num_dumps[path] = sum(
tensor_name_to_num_dumps.get(tensor_name, 0)
for tensor_name in path_to_tensor_names[path])
output = []
for file_path in path_to_node_names:
output.append((
file_path,
guess_is_tensorflow_py_library(file_path),
len(path_to_node_names.get(file_path, {})),
len(path_to_tensor_names.get(file_path, {})),
path_to_num_dumps.get(file_path, 0),
path_to_first_line[file_path]))
return sorted(output, key=lambda x: x[0])
def annotate_source_against_profile(profile_data,
source_file_path,
node_name_filter=None,
op_type_filter=None,
min_line=None,
max_line=None):
"""Annotate a Python source file with profiling information at each line.
(The annotation doesn't change the source file itself.)
Args:
profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.
source_file_path: (`str`) Path to the source file being annotated.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a the namedtuple
`profiling.LineOrFuncProfileSummary`.
"""
source_file_path = _norm_abs_path(source_file_path)
node_name_regex = re.compile(node_name_filter) if node_name_filter else None
op_type_regex = re.compile(op_type_filter) if op_type_filter else None
line_to_profile_summary = {}
for profile_datum in profile_data:
if not profile_datum.file_path:
continue
if _norm_abs_path(profile_datum.file_path) != source_file_path:
continue
if (min_line is not None and profile_datum.line_number < min_line or
max_line is not None and profile_datum.line_number >= max_line):
continue
if (node_name_regex and
not node_name_regex.match(profile_datum.node_exec_stats.node_name)):
continue
if op_type_regex and not op_type_regex.match(profile_datum.op_type):
continue
if profile_datum.line_number not in line_to_profile_summary:
line_to_profile_summary[profile_datum.line_number] = (
profiling.AggregateProfile(profile_datum))
else:
line_to_profile_summary[profile_datum.line_number].add(profile_datum)
return line_to_profile_summary
|
apache-2.0
| 5,331,100,872,048,130,000
| 35.467363
| 80
| 0.658695
| false
| 3.690092
| false
| false
| false
|
markreidvfx/pyaaf_old
|
docs/parse_aaf_header.py
|
1
|
2348
|
import pickle
def parse_aaf_header(header_path, dest_path=None):
if not dest_path:
dest_path = 'docs.pkl'
f = open(header_path, 'r')
header = f.read()
f.close()
comments = ""
interface = {}
current = None
for line in header.splitlines():
if line.count("//"):
if line.count("// Interface"):
current = line.replace("// Interface",'').strip()
current = current.replace('IEnumAAF','AxIter.').replace("IAAF",'Ax')
if current:
if not interface.has_key(current):
interface[current] = ""
interface[current] += line
interface[current] += '\n'
doc_dict = {}
for item, value in sorted(interface.items()):
for i in value.split("//***********************************************************"):
lines = i.splitlines()
method = None
try:
line2 = lines[2]
if line2.count("("):
method = line2.replace("//",'').replace("(",'').replace(")","").strip()
except:
pass
if method:
if not doc_dict.has_key(item):
doc_dict[item] = {}
doc = ""
for l in lines[3:]:
doc_line = """ ///"""
if l.count(doc_line):
doc += l.replace(doc_line,'') + '\n'
doc_dict[item][method] = doc
#"\n".join(lines[3:])
for key,value in sorted(doc_dict.items()):
print key
for method,docs in value.items():
print key,'::',method
print docs
pickle.dump(doc_dict,open(dest_path, 'w'),pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
header_path = args[0]
dest_path = None
if len(args) > 1:
dest_path = args[1]
parse_aaf_header(header_path,dest_path)
|
mit
| -8,351,774,476,693,415,000
| 25.681818
| 94
| 0.405877
| false
| 4.667992
| false
| false
| false
|
facebook/mcrouter
|
mcrouter/test/test_latency_injection_route.py
|
1
|
1957
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLatencyInjectionRoute(McrouterTestCase):
config_latency_before = './mcrouter/test/test_latency_injection_before.json'
config_latency_after = './mcrouter/test/test_latency_injection_before.json'
config_latency_total = './mcrouter/test/test_latency_injection_before.json'
def setUp(self) -> None:
self.mc = self.add_server(Memcached())
self.mcrouter_latency_before =\
self.add_mcrouter(self.config_latency_before)
self.mcrouter_latency_after =\
self.add_mcrouter(self.config_latency_after)
self.mcrouter_latency_total =\
self.add_mcrouter(self.config_latency_total)
def test_latency_before(self) -> None:
self.mc.set("key1", "value1")
t_start = datetime.now()
self.assertEqual("value1", self.mcrouter_latency_before.get("key1"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 2)
def test_latency_after(self) -> None:
self.mc.set("key2", "value2")
t_start = datetime.now()
self.assertTrue("value2", self.mcrouter_latency_after.get("key2"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 1)
def test_latency_total(self) -> None:
self.mc.set("key3", "value3")
t_start = datetime.now()
self.assertTrue("value3", self.mcrouter_latency_total.get("key3"))
t_end = datetime.now()
duration = t_end - t_start
self.assertGreaterEqual(duration.total_seconds(), 1)
|
mit
| 7,733,254,879,666,440,000
| 33.946429
| 80
| 0.663771
| false
| 3.590826
| true
| false
| false
|
gooddata/openstack-nova
|
nova/conf/vnc.py
|
1
|
9039
|
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import types
vnc_group = cfg.OptGroup(
'vnc',
title='VNC options',
help="""
Virtual Network Computer (VNC) can be used to provide remote desktop
console access to instances for tenants and/or administrators.""")
ALL_OPTS = [
cfg.BoolOpt(
'enabled',
default=True,
deprecated_group='DEFAULT',
deprecated_name='vnc_enabled',
help="""
Enable VNC related features.
Guests will get created with graphical devices to support this. Clients
(for example Horizon) can then establish a VNC connection to the guest.
"""),
cfg.StrOpt(
'keymap',
deprecated_group='DEFAULT',
deprecated_name='vnc_keymap',
deprecated_for_removal=True,
deprecated_since='18.0.0',
deprecated_reason="""
Configuring this option forces QEMU to do keymap conversions. These conversions
are lossy and can result in significant issues for users of non en-US
keyboards. You should instead use a VNC client that supports Extended Key Event
messages, such as noVNC 1.0.0. Refer to bug #1682020 for more information.""",
help="""
Keymap for VNC.
The keyboard mapping (keymap) determines which keyboard layout a VNC
session should use by default.
Possible values:
* A keyboard layout which is supported by the underlying hypervisor on
this node. This is usually an 'IETF language tag' (for example
'en-us'). If you use QEMU as hypervisor, you should find the list
of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
"""),
cfg.HostAddressOpt(
'server_listen',
default='127.0.0.1',
deprecated_opts=[
cfg.DeprecatedOpt('vncserver_listen', group='DEFAULT'),
cfg.DeprecatedOpt('vncserver_listen', group='vnc'),
],
help="""
The IP address or hostname on which an instance should listen to for
incoming VNC connection requests on this node.
"""),
cfg.HostAddressOpt(
'server_proxyclient_address',
default='127.0.0.1',
deprecated_opts=[
cfg.DeprecatedOpt('vncserver_proxyclient_address',
group='DEFAULT'),
cfg.DeprecatedOpt('vncserver_proxyclient_address', group='vnc'),
],
help="""
Private, internal IP address or hostname of VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients.
This option sets the private address to which proxy clients, such as
``nova-xvpvncproxy``, should connect to.
"""),
cfg.URIOpt(
'novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
deprecated_group='DEFAULT',
help="""
Public address of noVNC VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the public base URL to which client systems will
connect. noVNC clients can use this address to connect to the noVNC
instance and, by extension, the VNC sessions.
If using noVNC >= 1.0.0, you should use ``vnc_lite.html`` instead of
``vnc_auto.html``.
Related options:
* novncproxy_host
* novncproxy_port
"""),
cfg.HostAddressOpt(
'xvpvncproxy_host',
default='0.0.0.0',
deprecated_group='DEFAULT',
help="""
IP address or hostname that the XVP VNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the private address to which the XVP VNC console proxy
service should bind to.
Related options:
* xvpvncproxy_port
* xvpvncproxy_base_url
"""),
cfg.PortOpt(
'xvpvncproxy_port',
default=6081,
deprecated_group='DEFAULT',
help="""
Port that the XVP VNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the private port to which the XVP VNC console proxy
service should bind to.
Related options:
* xvpvncproxy_host
* xvpvncproxy_base_url
"""),
cfg.URIOpt(
'xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
deprecated_group='DEFAULT',
help="""
Public URL address of XVP VNC console proxy.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. Xen provides
the Xenserver VNC Proxy, or XVP, as an alternative to the
websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
XVP clients are Java-based.
This option sets the public base URL to which client systems will
connect. XVP clients can use this address to connect to the XVP
instance and, by extension, the VNC sessions.
Related options:
* xvpvncproxy_host
* xvpvncproxy_port
"""),
]
CLI_OPTS = [
cfg.StrOpt(
'novncproxy_host',
default='0.0.0.0',
deprecated_group='DEFAULT',
help="""
IP address that the noVNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the private address to which the noVNC console proxy
service should bind to.
Related options:
* novncproxy_port
* novncproxy_base_url
"""),
cfg.PortOpt(
'novncproxy_port',
default=6080,
deprecated_group='DEFAULT',
help="""
Port that the noVNC console proxy should bind to.
The VNC proxy is an OpenStack component that enables compute service
users to access their instances through VNC clients. noVNC provides
VNC support through a websocket-based client.
This option sets the private port to which the noVNC console proxy
service should bind to.
Related options:
* novncproxy_host
* novncproxy_base_url
"""),
cfg.ListOpt(
'auth_schemes',
item_type=types.String(choices=(
('none', 'Allow connection without authentication'),
('vencrypt', 'Use VeNCrypt authentication scheme'),
)),
default=['none'],
help="""
The authentication schemes to use with the compute node.
Control what RFB authentication schemes are permitted for connections between
the proxy and the compute host. If multiple schemes are enabled, the first
matching scheme will be used, thus the strongest schemes should be listed
first.
Related options:
* ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must also be set
"""),
cfg.StrOpt(
'vencrypt_client_key',
help="""The path to the client certificate PEM file (for x509)
The fully qualified path to a PEM file containing the private key which the VNC
proxy server presents to the compute node during VNC authentication.
Related options:
* ``vnc.auth_schemes``: must include ``vencrypt``
* ``vnc.vencrypt_client_cert``: must also be set
"""),
cfg.StrOpt(
'vencrypt_client_cert',
help="""The path to the client key file (for x509)
The fully qualified path to a PEM file containing the x509 certificate which
the VNC proxy server presents to the compute node during VNC authentication.
Realted options:
* ``vnc.auth_schemes``: must include ``vencrypt``
* ``vnc.vencrypt_client_key``: must also be set
"""),
cfg.StrOpt(
'vencrypt_ca_certs',
help="""The path to the CA certificate PEM file
The fully qualified path to a PEM file containing one or more x509 certificates
for the certificate authorities used by the compute node VNC server.
Related options:
* ``vnc.auth_schemes``: must include ``vencrypt``
"""),
]
ALL_OPTS.extend(CLI_OPTS)
def register_opts(conf):
conf.register_group(vnc_group)
conf.register_opts(ALL_OPTS, group=vnc_group)
def register_cli_opts(conf):
conf.register_cli_opts(CLI_OPTS, group=vnc_group)
def list_opts():
return {vnc_group: ALL_OPTS}
|
apache-2.0
| -2,166,768,096,561,270,300
| 29.640678
| 79
| 0.708043
| false
| 3.856229
| false
| false
| false
|
shreyshahi/ghofAtkinson
|
ghofAtkinson/model.py
|
1
|
2759
|
import numpy as np
PERIODS = np.array([0.01, 0.07, 0.09, 0.11, 0.14, 0.18, 0.22, 0.27, 0.34, 0.42, 0.53, 0.65, 0.81, 1.01, 1.25, 1.56, 1.92, 2.44, 3.03, 3.7, 4.55, 5.88, 7.14, 9.09])
c1 = [-0.00219, -0.00236, -0.00244, -0.00245, -0.0024, -0.00235, -0.00235, -0.00233, -0.00231, -0.00224, -0.00213, -0.002, -0.00183, -0.00158, -0.00133, -0.00112, -0.00086, -0.00059, -0.00039, -0.00023, -0.00005, 0, 0, 0]
c2 = [-0.00298, -0.00329, -0.00346, -0.00356, -0.00357, -0.00358, -0.00355, -0.00346, -0.00333, -0.00315, -0.0029, -0.00262, -0.00234, -0.00205, -0.00177, -0.00152, -0.00125, -0.00097, -0.00075, -0.00057, -0.0004, -0.00027, -0.00019, -0.00019]
c3 = [-0.219, -0.046, 0.027, 0.01, -0.082, -0.18, -0.289, -0.386, -0.438, -0.52, -0.606, -0.672, -0.705, -0.69, -0.646, -0.578, -0.518, -0.513, -0.554, -0.574, -0.561, -0.491, -0.462, -0.413]
a = [2.8193, 3.1807, 3.3592, 3.4483, 3.5005, 3.4463, 3.3178, 3.2008, 3.0371, 2.7958, 2.5332, 2.3234, 2.1321, 1.9852, 1.8442, 1.6301, 1.4124, 1.1154, 0.7965, 0.5093, 0.2578, -0.1469, -0.5012, -1.093]
b = [0.1908, 0.1759, 0.17, 0.1669, 0.1604, 0.165, 0.1763, 0.1839, 0.197, 0.2154, 0.2331, 0.2435, 0.2522, 0.2561, 0.2599, 0.273, 0.2851, 0.3015, 0.3197, 0.3361, 0.3497, 0.3835, 0.4119, 0.4641]
Dcascadia = [-0.301, -0.357, -0.357, -0.319, -0.272, -0.237, -0.183, -0.114, -0.046, 0.002, 0.007, 0.011, 0.014, 0.021, 0.089, 0.139, 0.174, 0.129, 0.079, 0.044, 0.013, 0, 0, 0]
PHI = [0.284, 0.313, 0.326, 0.329, 0.324, 0.312, 0.31, 0.312, 0.307, 0.295, 0.276, 0.257, 0.249, 0.249, 0.261, 0.274, 0.285, 0.275, 0.264, 0.252, 0.237, 0.218, 0.201, 0.175]
TAU = [0.196, 0.215, 0.22, 0.218, 0.212, 0.206, 0.202, 0.199, 0.191, 0.171, 0.155, 0.147, 0.131, 0.115, 0.11, 0.113, 0.121, 0.132, 0.137, 0.138, 0.147, 0.151, 0.148, 0.155]
def computeSpectra(mag, r, faba, vs30, cascadia, epistemic, per):
F = 1 - faba
B = faba
pIdx = np.nonzero(PERIODS == per)[0][0]
c0 = a[pIdx] + b[pIdx] * mag
reff = np.sqrt(r**2 + 60**2)
logSa = c0 - np.log10(reff) + c1[pIdx] * F * r + c2[pIdx] * B * r + c3[pIdx] * np.log10(vs30/760)
if cascadia == 1:
logSa += Dcascadia[pIdx]
if epistemic == 1:
correction = np.min([0.15 + 0.0007*r , 0.35])
logSa += correction
if epistemic == -1:
correction = -1 * np.min([0.15 + 0.0007*r , 0.35])
logSa += correction
return logSa
def interEventSigma(periods):
tau = [np.interp(np.log10(per) , np.log10(PERIODS) , TAU) for per in periods]
return tau
def intraEventSigma(periods):
phi = [np.interp(np.log10(per) , np.log10(PERIODS) , PHI) for per in periods]
return phi
|
mit
| 1,697,952,010,962,822,000
| 58.978261
| 243
| 0.538238
| false
| 1.841789
| false
| false
| false
|
rancher/cattle
|
tests/integration/cattletest/core/test_volume.py
|
1
|
14377
|
from random import choice
from string import hexdigits
from common_fixtures import * # NOQA
from gdapi import ApiError
from gdapi import ClientApiError
VOLUME_CLEANUP_LABEL = 'io.rancher.container.volume_cleanup_strategy'
def test_volume_cant_delete_active(client, context):
c = client.create_container(imageUuid=context.image_uuid)
c = client.wait_success(c)
assert c.state == 'running'
volume = c.volumes()[0]
assert volume.state == 'active'
# Assert an active volume cannot be deleted
with pytest.raises(ApiError) as e:
client.delete(volume)
assert e.value.error.status == 405
def test_volume_create_state(client, context):
name = random_str()
c = client.create_volume(name=name, driver='local')
c = client.wait_success(c)
assert c.state == 'inactive'
assert c.uri == 'local:///%s' % name
volume = client.wait_success(client.delete(c))
assert volume.removed is not None
def test_volume_create_size_validation(client, context):
with pytest.raises(ApiError) as e:
client.create_volume(name='foo', driver='foo', sizeMb=111)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
def test_volume_create_without_driver_name(client, context):
name = random_str()
with pytest.raises(ApiError) as e:
client.create_volume(name=name)
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
def test_volume_create_with_opts(client, context):
name = random_str()
c = client.create_volume(name=name,
driver='local',
driverOpts={'size': '1G'})
c = client.wait_success(c)
assert c.state == 'inactive'
assert c.uri == 'local:///%s' % name
volume = client.wait_success(client.delete(c))
assert volume.removed is not None
def test_create_container_with_volume(new_context, super_client):
client = new_context.client
name1 = random_str()
v1 = client.create_volume(name=name1, driver='local')
v1 = client.wait_success(v1)
assert v1.state == 'inactive'
name2 = random_str()
v2 = client.create_volume(name=name2, driver='local')
v2 = client.wait_success(v2)
assert v2.state == 'inactive'
dataVolumeMounts = {'/var/lib/docker/mntpt1': v1.id,
'/var/lib/docker/mntpt2': v2.id}
dataVolumes = {'/home': '/home'}
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=dataVolumes,
dataVolumeMounts=dataVolumeMounts)
c = client.wait_success(c, timeout=240)
assert c.state == 'running'
dataVol1 = '%s:/var/lib/docker/mntpt1' % name1
dataVol2 = '%s:/var/lib/docker/mntpt2' % name2
dataVol1Found = False
dataVol2Found = False
for dataVol in c.dataVolumes:
if dataVol == dataVol1:
dataVol1Found = True
if dataVol == dataVol2:
dataVol2Found = True
assert dataVol1Found and dataVol2Found
# Mounting happens in docker specific code; need to simulate
create_mount(v1, c, client, super_client)
create_mount(v2, c, client, super_client)
v1 = client.wait_success(v1)
v2 = client.wait_success(v2)
assert v1.state == 'active'
assert v2.state == 'active'
# Assert an active volume cannot be deleted
with pytest.raises(ApiError) as e:
client.delete(v1)
assert e.value.error.status == 405
assert len(c.volumes()) == 1
assert c.volumes()[0].id not in [v1.id, v2.id]
vsp1 = super_client.list_volumeStoragePoolMap(volumeId=v1.id)
vsp2 = super_client.list_volumeStoragePoolMap(volumeId=v2.id)
assert vsp1 is not None and len(vsp1) == 1
assert vsp2 is not None and len(vsp2) == 1
spid1 = vsp1[0].storagePoolId
spid2 = vsp2[0].storagePoolId
host1 = super_client.list_storagePoolHostMap(storagePoolId=spid1)
host2 = super_client.list_storagePoolHostMap(storagePoolId=spid2)
assert host1[0].id == host2[0].id
new_host = register_simulated_host(new_context)
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=dataVolumes,
requestedHostId=new_host.id,
dataVolumeMounts=dataVolumeMounts)
client.wait_success(c)
assert 'must have exactly these pool(s)' in e.value.message
def create_resources(context, client, super_client, labels={}):
vol = client.create_volume(name=random_str(), driver='local')
unnamed_vol = client.create_volume(name=random_vol_name(), driver='local')
data_volume_mounts = {'/con/path': vol.id, '/path2': unnamed_vol.id}
c = client.create_container(imageUuid=context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels=labels)
c = client.wait_success(c)
# Simulate volume mount (only happens with real docker)
create_mount(vol, c, client, super_client)
create_mount(unnamed_vol, c, client, super_client)
return c, vol, unnamed_vol
def test_instance_volume_cleanup_strategy(new_context, super_client):
client = new_context.client
# Assert default strategy to delete unnamed volumes only
c, vol, unnamed_vol = create_resources(new_context, client, super_client)
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert explicit 'unnamed' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'unnamed'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert 'none' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'none'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.state == 'detached')
# Assert 'all' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'all'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.removed is not None)
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert invalid value for label is rejected
with pytest.raises(ApiError):
create_resources(
new_context, client, super_client,
labels={VOLUME_CLEANUP_LABEL: 'foo'})
def create_container_and_mount(client, data_volume_mounts, new_context,
super_client, vols):
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels={VOLUME_CLEANUP_LABEL: 'all'})
c = client.wait_success(c)
for vol in vols:
c, m = create_mount(vol, c, client, super_client)
return c
def purge_instance_and_check_volume_state(c, vols, client, state=None):
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
def check(obj):
if state is not None:
return obj.state == state
else:
return obj.removed is not None
def report(obj):
if state is not None:
return 'State: %s. Expected: %s' % (obj.state, state)
else:
return 'Removed is None'
for vol in vols:
wait_for_condition(client, vol,
lambda x: check(x),
lambda x: report(x))
def create_volume_and_dvm(client, count):
dvms = {}
vols = []
for i in range(0, count):
v1 = client.create_volume(name=random_str(), driver='local')
dvms['/con/path%s' % i] = v1.id
vols.append(v1)
return dvms, vols
def test_volume_remove_on_purge(new_context, super_client):
client = new_context.client
# Simple case: volume associated with one container that is purged
# volume gets removed
dvms, vols = create_volume_and_dvm(client, 2)
c = create_container_and_mount(client, dvms, new_context,
super_client, vols)
purge_instance_and_check_volume_state(c, vols, client)
# Vol associated with multiple containers
dvms, vols = create_volume_and_dvm(client, 2)
c = create_container_and_mount(client, dvms, new_context,
super_client, vols)
c2 = create_container_and_mount(client, dvms, new_context,
super_client, vols)
purge_instance_and_check_volume_state(c, vols, client, state='active')
purge_instance_and_check_volume_state(c2, vols, client)
def test_volume_mounting_and_delete(new_context, super_client):
client = new_context.client
v1 = client.create_volume(name=random_str(), driver='local')
data_volume_mounts = {'/con/path': v1.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
v1 = client.wait_success(v1)
assert len(v1.storagePools()) == 1
# Creating a mount that associates the volume to the container
# only happens when integrating with real docker, so we'll simulate it
c, m = create_mount(v1, c, client, super_client)
# Assert that creating the mount results in activating volume
check_mount_count(client, c, 1)
assert m.state == 'active'
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
# Assert that a volume with mounts cannot be deactivated, removed or purged
assert 'deactivate' not in v1.actions and 'remove' not in v1.actions \
and 'purge' not in v1.actions
# Assert that once the container is removed, the mounts are removed and the
# the volume is deactivated
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
v1 = wait_for_condition(client, v1, lambda x: x.state == 'detached')
check_mount_count(client, c, 0)
# Mount to new container to assert volume goes back to active
c2 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c2 = client.wait_success(c2)
c2, m2 = create_mount(v1, c2, client, super_client)
check_mount_count(client, c2, 1)
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
# Make the volume be mounted to two containers
c3 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels={VOLUME_CLEANUP_LABEL: 'all'})
c3 = client.wait_success(c3)
c3, m3 = create_mount(v1, c3, client, super_client)
check_mount_count(client, c3, 1)
check_mount_count(client, v1, 2)
# Remove 1 one of the containers and assert that actions are still blocked
c2 = client.wait_success(c2.stop())
c2 = client.wait_success(c2.remove())
check_mount_count(client, c2, 0)
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
v1 = client.wait_success(v1)
check_mount_count(client, v1, 1)
assert 'deactivate' not in v1.actions and 'remove' not in v1.actions \
and 'purge' not in v1.actions
# Remove remaining container and assert the volume can be removed
c3 = client.wait_success(c3.stop())
c3 = client.wait_success(c3.remove())
check_mount_count(client, c3, 0)
wait_for_condition(client, v1, lambda x: x.removed is not None)
def test_volume_storage_pool_purge(new_context, super_client):
client = new_context.client
vol_name = random_str()
v1 = client.create_volume(name=vol_name, driver='local')
data_volume_mounts = {'/con/path': v1.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
c, m = create_mount(v1, c, client, super_client)
check_mount_count(client, c, 1)
assert m.state == 'active'
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
sp = v1.storagePools()[0]
host = c.hosts()[0]
host = client.wait_success(host.deactivate())
host = client.wait_success(host.remove())
client.wait_success(host.purge())
wait_for_condition(client, sp, lambda x: x.removed is not None)
wait_for_condition(client, v1, lambda x: x.removed is not None)
register_simulated_host(new_context)
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/foo' % vol_name])
c = client.wait_success(c)
assert c.state == 'running'
def create_mount(volume, container, client, super_client):
mount = super_client.create_mount(volumeId=volume.id,
instanceId=container.id,
accountId=container.accountId)
mount = super_client.wait_success(mount)
return client.reload(container), mount
def check_mount_count(client, resource, count):
wait_for_condition(client, resource, lambda x: len(
[i for i in resource.mounts_link() if i.state != 'inactive']) == count)
def random_vol_name():
# Emulates the random name that docker would assign to an unnamed volume
return ''.join(choice(hexdigits) for i in range(64))
|
apache-2.0
| 3,075,792,182,796,224,500
| 36.054124
| 79
| 0.634555
| false
| 3.491258
| true
| false
| false
|
robmcmullen/peppy
|
peppy/major_modes/flagship.py
|
1
|
1808
|
# peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""FlagShip programming language editing support.
Major mode for editing FlagShip files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class FlagShipMode(FundamentalMode):
"""Stub major mode for editing FlagShip files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'FlagShip'
editra_synonym = 'FlagShip'
stc_lexer_id = wx.stc.STC_LEX_FLAGSHIP
start_line_comment = u'//'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'prg', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[128], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[129], hidden=False, fullwidth=True),
StrParam('keyword_set_2', unique_keywords[130], hidden=False, fullwidth=True),
StrParam('keyword_set_3', unique_keywords[131], hidden=False, fullwidth=True),
)
class FlagShipModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for FlagShip
"""
def getMajorModes(self):
yield FlagShipMode
|
gpl-2.0
| -6,349,689,533,123,834,000
| 33.113208
| 86
| 0.722898
| false
| 3.637827
| false
| false
| false
|
springload/wagtailmodeladmin
|
wagtailmodeladmin/views.py
|
1
|
30988
|
import sys
import operator
from collections import OrderedDict
from functools import reduce
from django.db import models
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import QUERY_TERMS
from django.shortcuts import get_object_or_404, redirect, render
from django.core.urlresolvers import reverse
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import Paginator, InvalidPage
from django.contrib.admin import FieldListFilter, widgets
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote)
from django.utils import six
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.http import urlencode
from django.utils.functional import cached_property
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from .permission_helpers import PermissionHelper, PagePermissionHelper
from .utils import get_url_name, ActionButtonHelper, permission_denied
from .forms import ParentChooserForm
# IndexView settings
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
class WMABaseView(TemplateView):
"""
Groups together common functionality for all app views.
"""
model_admin = None
meta_title = ''
page_title = ''
page_subtitle = ''
def __init__(self, model_admin):
self.model_admin = model_admin
self.model = model_admin.model
self.opts = model_admin.model._meta
self.pk_attname = self.opts.pk.attname
self.is_pagemodel = model_admin.is_pagemodel
if self.is_pagemodel:
self.permission_helper = PagePermissionHelper(self.model)
else:
self.permission_helper = PermissionHelper(self.model)
@cached_property
def app_label(self):
return capfirst(force_text(self.opts.app_label))
@cached_property
def model_name(self):
return capfirst(force_text(self.opts.verbose_name))
@cached_property
def model_name_plural(self):
return capfirst(force_text(self.opts.verbose_name_plural))
@cached_property
def get_index_url(self):
return self.model_admin.get_index_url()
@cached_property
def get_create_url(self):
return self.model_admin.get_create_url()
@cached_property
def menu_icon(self):
return self.model_admin.get_menu_icon()
@cached_property
def header_icon(self):
return self.menu_icon
def get_edit_url(self, obj):
return reverse(get_url_name(self.opts, 'edit'), args=(obj.pk,))
def get_delete_url(self, obj):
return reverse(get_url_name(self.opts, 'delete'), args=(obj.pk,))
def prime_session_for_redirection(self):
self.request.session['return_to_index_url'] = self.get_index_url
def get_page_title(self):
return self.page_title or self.model_name_plural
def get_meta_title(self):
return self.meta_title or self.get_page_title()
def get_base_queryset(self, request):
return self.model_admin.get_queryset(request)
class WMAFormView(WMABaseView, FormView):
def get_edit_handler_class(self):
panels = extract_panel_definitions_from_model_class(self.model)
return ObjectList(panels).bind_to_model(self.model)
def get_form_class(self):
return self.get_edit_handler_class().get_form_class(self.model)
def get_success_url(self):
return self.get_index_url
def get_instance(self):
return getattr(self, 'instance', None) or self.model()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs.update({'instance': self.get_instance()})
return kwargs
def get_context_data(self, **kwargs):
instance = self.get_instance()
edit_handler_class = self.get_edit_handler_class()
form = self.get_form()
return {
'view': self,
'edit_handler': edit_handler_class(instance=instance, form=form)
}
def get_success_message(self, instance):
return _("{model_name} '{instance}' created.").format(
model_name=self.model_name, instance=instance),
def get_success_message_buttons(self, instance):
return [
messages.button(self.get_edit_url(instance), _('Edit'))
]
def get_error_message(self):
model_name = self.model_name.lower()
return _("The %s could not be created due to errors.") % model_name
def form_valid(self, form):
instance = form.save()
messages.success(
self.request, self.get_success_message(instance),
buttons=self.get_success_message_buttons(instance)
)
return redirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, self.get_error_message())
return self.render_to_response(self.get_context_data())
class ObjectSpecificView(WMABaseView):
object_id = None
instance = None
def __init__(self, model_admin, object_id):
super(ObjectSpecificView, self).__init__(model_admin)
self.object_id = object_id
self.pk_safe = quote(object_id)
filter_kwargs = {}
filter_kwargs[self.pk_attname] = self.pk_safe
object_qs = model_admin.model._default_manager.get_queryset().filter(
**filter_kwargs)
self.instance = get_object_or_404(object_qs)
def check_action_permitted(self):
return True
def get_edit_url(self, obj=None):
return reverse(get_url_name(self.opts, 'edit'), args=(self.pk_safe,))
def get_delete_url(self, obj=None):
return reverse(get_url_name(self.opts, 'delete'), args=(self.pk_safe,))
class IndexView(WMABaseView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.list_display = self.model_admin.get_list_display(request)
self.list_filter = self.model_admin.get_list_filter(request)
self.search_fields = self.model_admin.get_search_fields(request)
self.items_per_page = self.model_admin.list_per_page
self.select_related = self.model_admin.list_select_related
request = self.request
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
if not self.permission_helper.allow_list_view(request.user):
return permission_denied(request)
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_action_buttons_for_obj(self, user, obj):
bh = ActionButtonHelper(self.model, self.permission_helper, user, obj)
return bh.get_permitted_buttons()
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
if self.search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def lookup_allowed(self, lookup, value):
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in self.model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field, _, _, _ = self.model._meta.get_field_by_name(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, ForeignObjectRel):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise DisallowedModelAdminLookup(
"Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(
request,
lookup_params,
self.model,
self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given
# field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field = list_filter
field_list_filter_class = FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model,
field_path)[-1]
spec = field_list_filter_class(
field,
request,
lookup_params,
self.model,
self.model_admin,
field_path=field_path)
# Check if we need to use distinct()
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
return (
filter_specs, bool(filter_specs), lookup_params, use_distinct
)
except FieldDoesNotExist as e:
six.reraise(
IncorrectLookupParameters,
IncorrectLookupParameters(e),
sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
def get_default_ordering(self, request):
if self.model_admin.get_ordering(request):
return self.model_admin.get_ordering(request)
if self.opts.ordering:
return self.opts.ordering
return ()
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.get_default_ordering(request))
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying
# sort field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on model_admin or model Meta, we don't
# know the right column numbers absolutely, because there might be
# morr than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.get_base_queryset(request)
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.select_related is True:
return qs.select_related()
if self.select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.select_related:
return qs.select_related(*self.select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field, models.ManyToOneRel):
return True
return False
def get_context_data(self, request, *args, **kwargs):
user = request.user
has_add_permission = self.permission_helper.has_add_permission(user)
all_count = self.get_base_queryset(request).count()
queryset = self.get_queryset(request)
result_count = queryset.count()
paginator = Paginator(queryset, self.items_per_page)
try:
page_obj = paginator.page(self.page_num + 1)
except InvalidPage:
page_obj = paginator.page(1)
context = {
'view': self,
'all_count': all_count,
'result_count': result_count,
'paginator': paginator,
'page_obj': page_obj,
'object_list': page_obj.object_list,
'has_add_permission': has_add_permission,
}
if self.is_pagemodel:
allowed_parent_types = self.model.allowed_parent_page_types()
user = request.user
valid_parents = self.permission_helper.get_valid_parent_pages(user)
valid_parent_count = valid_parents.count()
context.update({
'no_valid_parents': not valid_parent_count,
'required_parent_types': allowed_parent_types,
})
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(request, *args, **kwargs)
if request.session.get('return_to_index_url'):
del(request.session['return_to_index_url'])
return self.render_to_response(context)
def get_template_names(self):
return self.model_admin.get_index_template()
class CreateView(WMAFormView):
page_title = _('New')
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.has_add_permission(request.user):
return permission_denied(request)
if self.is_pagemodel:
user = request.user
parents = self.permission_helper.get_valid_parent_pages(user)
parent_count = parents.count()
# There's only one available parent for this page type for this
# user, so we send them along with that as the chosen parent page
if parent_count == 1:
parent = parents.get()
return redirect(
'wagtailadmin_pages_create', self.opts.app_label,
self.opts.model_name, parent.pk)
# The page can be added in multiple places, so redirect to the
# choose_parent_page view so that the parent can be specified
return redirect(self.model_admin.get_choose_parent_page_url())
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Create new %s') % self.model_name.lower()
def get_page_subtitle(self):
return self.model_name
def get_template_names(self):
return self.model_admin.get_create_template()
class ChooseParentPageView(WMABaseView):
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.has_add_permission(request.user):
return permission_denied(request)
return super(ChooseParentPageView, self).dispatch(request, *args,
**kwargs)
def get_page_title(self):
return _('Add %s') % self.model_name
def get_form(self, request):
parents = self.permission_helper.get_valid_parent_pages(request.user)
return ParentChooserForm(parents, request.POST or None)
def get(self, request, *args, **kwargs):
form = self.get_form(request)
if form.is_valid():
parent = form.cleaned_data['parent_page']
return redirect('wagtailadmin_pages_create', self.opts.app_label,
self.opts.model_name, quote(parent.pk))
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def get_template(self):
return self.model_admin.get_choose_parent_page_template()
class EditView(ObjectSpecificView, CreateView):
page_title = _('Editing')
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_edit_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
if self.is_pagemodel:
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_edit', self.object_id)
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Editing %s') % self.model_name.lower()
def page_subtitle(self):
return self.instance
def get_success_message(self, instance):
return _("{model_name} '{instance}' updated.").format(
model_name=self.model_name, instance=instance)
def get_error_message(self):
model_name = self.model_name.lower()
return _("The %s could not be saved due to errors.") % model_name
def get_template_names(self):
return self.model_admin.get_edit_template()
class DeleteView(ObjectSpecificView):
page_title = _('Delete')
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_delete_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
if self.is_pagemodel:
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_delete', self.object_id)
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Confirm deletion of %s') % self.model_name.lower()
def get_page_subtitle(self):
return self.instance
def confirmation_message(self):
return _(
"Are you sure you want to delete this %s? If other things in your "
"site are related to it, they may also be effected."
) % self.model_name
def get(self, request, *args, **kwargs):
instance = self.instance
if request.POST:
instance.delete()
messages.success(
request,
_("{model_name} '{instance}' deleted.").format(
model_name=self.model_name, instance=instance))
return redirect(self.get_index_url)
context = {'view': self, 'instance': self.instance}
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_template_names(self):
return self.model_admin.get_delete_template()
class UnpublishRedirectView(ObjectSpecificView):
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_unpublish_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_unpublish', self.object_id)
class CopyRedirectView(ObjectSpecificView):
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_copy_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_copy', self.object_id)
|
mit
| 5,287,674,027,923,612,000
| 37.209618
| 79
| 0.595134
| false
| 4.258348
| false
| false
| false
|
iamahuman/angr
|
angr/exploration_techniques/loop_seer.py
|
1
|
6356
|
import logging
from . import ExplorationTechnique
from ..knowledge_base import KnowledgeBase
from ..knowledge_plugins.functions import Function
l = logging.getLogger(name=__name__)
class LoopSeer(ExplorationTechnique):
"""
This exploration technique monitors exploration and maintains all
loop-related data (well, currently it is just the loop trip counts, but feel
free to add something else).
"""
def __init__(self, cfg=None, functions=None, loops=None, use_header=False, bound=None, bound_reached=None, discard_stash='spinning'):
"""
:param cfg: Normalized CFG is required.
:param functions: Function(s) containing the loop(s) to be analyzed.
:param loops: Loop(s) to be analyzed.
:param use_header: Whether to use header based trip counter to compare with the bound limit.
:param bound: Limit the number of iteration a loop may be executed.
:param bound_reached: If provided, should be a function that takes a SimulationManager and returns
a SimulationManager. Will be called when loop execution reach the given bound.
Default to moving states that exceed the loop limit to a discard stash.
:param discard_stash: Name of the stash containing states exceeding the loop limit.
"""
super(LoopSeer, self).__init__()
self.cfg = cfg
self.functions = functions
self.bound = bound
self.bound_reached = bound_reached
self.discard_stash = discard_stash
self.use_header = use_header
self.loops = {}
if type(loops) is Loop:
loops = [loops]
if type(loops) in (list, tuple) and all(type(l) is Loop for l in loops):
for loop in loops:
if loop.entry_edges:
self.loops[loop.entry_edges[0][0].addr] = loop
elif loops is not None:
raise TypeError("Invalid type for 'loops' parameter!")
def setup(self, simgr):
if self.cfg is None:
cfg_kb = KnowledgeBase(self.project)
self.cfg = self.project.analyses.CFGFast(kb=cfg_kb, normalize=True)
elif not self.cfg.normalized:
l.warning("LoopSeer must use a normalized CFG. Normalizing the provided CFG...")
self.cfg.normalize()
funcs = None
if type(self.functions) in (str, int, Function):
funcs = [self._get_function(self.functions)]
elif type(self.functions) in (list, tuple) and all(type(f) in (str, int, Function) for f in self.functions):
funcs = []
for f in self.functions:
func = self._get_function(f)
if func is not None:
funcs.append(func)
funcs = None if not funcs else funcs
elif self.functions is not None:
raise TypeError("Invalid type for 'functions' parameter!")
if not self.loops:
loop_finder = self.project.analyses.LoopFinder(kb=self.cfg.kb, normalize=True, functions=funcs)
for loop in loop_finder.loops:
if loop.entry_edges:
entry = loop.entry_edges[0][0]
self.loops[entry.addr] = loop
def step(self, simgr, stash='active', **kwargs):
for state in simgr.stashes[stash]:
# Processing a currently running loop
if state.loop_data.current_loop:
loop = state.loop_data.current_loop[-1][0]
header = loop.entry.addr
if state.addr == header:
continue_addrs = [e[0].addr for e in loop.continue_edges]
if state.history.addr in continue_addrs:
state.loop_data.back_edge_trip_counts[state.addr][-1] += 1
state.loop_data.header_trip_counts[state.addr][-1] += 1
elif state.addr in state.loop_data.current_loop[-1][1]:
state.loop_data.current_loop.pop()
if self.bound is not None:
counts = state.loop_data.back_edge_trip_counts[header][-1] if not self.use_header else \
state.loop_data.header_trip_counts[header][-1]
if counts > self.bound:
if self.bound_reached is not None:
simgr = self.bound_reached(simgr)
else:
simgr.stashes[stash].remove(state)
simgr.stashes[self.discard_stash].append(state)
l.debug("%s back edge based trip counts %s", state, state.loop_data.back_edge_trip_counts)
l.debug("%s header based trip counts %s", state, state.loop_data.header_trip_counts)
# Loop entry detected. This test is put here because in case of
# nested loops, we want to handle the outer loop before proceeding
# the inner loop.
if state.addr in self.loops:
loop = self.loops[state.addr]
header = loop.entry.addr
exits = [e[1].addr for e in loop.break_edges]
state.loop_data.back_edge_trip_counts[header].append(0)
state.loop_data.header_trip_counts[header].append(0)
state.loop_data.current_loop.append((loop, exits))
simgr.step(stash=stash, **kwargs)
return simgr
def successors(self, simgr, state, **kwargs):
node = self.cfg.get_any_node(state.addr)
if node is not None:
kwargs['num_inst'] = min(kwargs.get('num_inst', float('inf')), len(node.instruction_addrs))
return simgr.successors(state, **kwargs)
def _get_function(self, func):
f = None
if type(func) is str:
f = self.cfg.kb.functions.function(name=func)
if f is None:
l.warning("Function '%s' doesn't exist in the CFG. Skipping...", func)
elif type(func) is int:
f = self.cfg.kb.functions.function(addr=func)
if f is None:
l.warning("Function at 0x%x doesn't exist in the CFG. Skipping...", func)
elif type(func) is Function:
f = func
return f
from ..analyses.loopfinder import Loop
|
bsd-2-clause
| -423,667,648,311,579,200
| 41.373333
| 137
| 0.575519
| false
| 4.146119
| false
| false
| false
|
emidln/django_roa
|
django_roa/db/exceptions.py
|
1
|
2114
|
from django.conf import settings
from django.utils.html import strip_tags
from django.utils.text import unescape_entities
from django.utils.encoding import force_unicode
ROA_DJANGO_ERRORS = getattr(settings, 'ROA_DJANGO_ERRORS', False)
class ROAException(Exception):
def __init__(self, exception):
if ROA_DJANGO_ERRORS and 'message' in exception \
and 'status_code' in exception:
self.msg = force_unicode(exception.message)
self.status_code = exception.status_code
else:
self.msg = force_unicode(exception)
self.status_code = 'XXX'
def __str__(self):
if ROA_DJANGO_ERRORS and '<body>' in self.msg:
return self.parse_django_error()
return self.msg
def parse_django_error(self):
"""Extract the summary part of a Django HTML error."""
try:
summary = self.msg.split(u'<body>\n<div id="summary">\n ', 1)[1]\
.split(u'<th>Python Executable:</th>', 1)[0]
traceback = self.msg.split(u'\n\nTraceback:', 1)[1]\
.split(u'</textarea>', 1)[0]
except IndexError:
return self.msg
result = []
title = None
for line in strip_tags(summary).split('\n'):
line_content = unescape_entities(line.strip())
if line_content:
if line_content.endswith(':'):
title = line_content
elif title is None:
title = "%s:" % line_content
else:
result.append("%s %s\n" % (title, line_content))
result.append("Status code: %s" % self.status_code)
indent, indent2 = u' ', u' '
return u"%(summary)s %(traceback)s".strip() % {
'summary': indent.join(force_unicode(line) for line in result),
'traceback': indent2.join(force_unicode(line+"\n") \
for line in traceback.split('\n')),
}
class ROANotImplementedYetException(Exception):
pass
|
bsd-3-clause
| 5,526,580,686,477,947,000
| 38.886792
| 78
| 0.544465
| false
| 4.136986
| false
| false
| false
|
hunter-87/binocular-dense-stereo
|
StereoVision-master/stereovision/ui_utils.py
|
1
|
9561
|
# Copyright (C) 2014 Daniel Lee <lee.daniel.1986@gmail.com>
#
# This file is part of StereoVision.
#
# StereoVision is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# StereoVision is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StereoVision. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities for easing user interaction with the ``stereovision`` package.
Variables:
* ``CHESSBOARD_ARGUMENTS`` - ``argparse.ArgumentParser`` for working with
chessboards
* ``STEREO_BM_FLAG`` - ``argparse.ArgumentParser`` for using StereoBM
Functions:
* ``find_files`` - Discover stereo images in directory
* ``calibrate_folder`` - Calibrate chessboard images discoverd in a folder
Classes:
* ``BMTuner`` - Tune block matching algorithm to camera pair
.. image:: classes_ui_utils.svg
"""
from argparse import ArgumentParser
from functools import partial
import os
import cv2
from progressbar import ProgressBar, Percentage, Bar
from stereovision.calibration import StereoCalibrator
from stereovision.exceptions import BadBlockMatcherArgumentError
#: Command line arguments for collecting information about chessboards
CHESSBOARD_ARGUMENTS = ArgumentParser(add_help=False)
CHESSBOARD_ARGUMENTS.add_argument("--rows", type=int,
help="Number of inside corners in the "
"chessboard's rows.", default=9)
CHESSBOARD_ARGUMENTS.add_argument("--columns", type=int,
help="Number of inside corners in the "
"chessboard's columns.", default=6)
CHESSBOARD_ARGUMENTS.add_argument("--square-size", help="Size of chessboard "
"squares in cm.", type=float, default=1.8)
#: Command line arguments for using StereoBM rather than StereoSGBM
STEREO_BM_FLAG = ArgumentParser(add_help=False)
STEREO_BM_FLAG.add_argument("--use_stereobm", help="Use StereoBM rather than "
"StereoSGBM block matcher.", action="store_true")
def find_files(folder):
"""Discover stereo photos and return them as a pairwise sorted list."""
files = [i for i in os.listdir(folder) if i.startswith("left")]
files.sort()
for i in range(len(files)):
insert_string = "right{}".format(files[i * 2][4:])
files.insert(i * 2 + 1, insert_string)
files = [os.path.join(folder, filename) for filename in files]
return files
def calibrate_folder(args):
"""
Calibrate camera based on chessboard images, write results to output folder.
All images are read from disk. Chessboard points are found and used to
calibrate the stereo pair. Finally, the calibration is written to the folder
specified in ``args``.
``args`` needs to contain the following fields:
input_files: List of paths to input files
rows: Number of rows in chessboard
columns: Number of columns in chessboard
square_size: Size of chessboard squares in cm
output_folder: Folder to write calibration to
"""
height, width = cv2.imread(args.input_files[0]).shape[:2]
calibrator = StereoCalibrator(args.rows, args.columns, args.square_size,
(width, height))
progress = ProgressBar(maxval=len(args.input_files),
widgets=[Bar("=", "[", "]"),
" ", Percentage()])
print("Reading input files...")
while args.input_files:
left, right = args.input_files[:2]
img_left, im_right = cv2.imread(left), cv2.imread(right)
calibrator.add_corners((img_left, im_right),
show_results=args.show_chessboards)
args.input_files = args.input_files[2:]
progress.update(progress.maxval - len(args.input_files))
progress.finish()
print("Calibrating cameras. This can take a while.")
calibration = calibrator.calibrate_cameras()
avg_error = calibrator.check_calibration(calibration)
print("The average error between chessboard points and their epipolar "
"lines is \n"
"{} pixels. This should be as small as possible.".format(avg_error))
calibration.export(args.output_folder)
class BMTuner(object):
"""
A class for tuning Stereo BM settings.
Display a normalized disparity picture from two pictures captured with a
``CalibratedPair`` and allow the user to manually tune the settings for the
``BlockMatcher``.
The settable parameters are intelligently read from the ``BlockMatcher``,
relying on the ``BlockMatcher`` exposing them as ``parameter_maxima``.
"""
#: Window to show results in
window_name = "BM Tuner"
def _set_value(self, parameter, new_value):
"""Try setting new parameter on ``block_matcher`` and update map."""
try:
self.block_matcher.__setattr__(parameter, new_value)
except BadBlockMatcherArgumentError:
return
self.update_disparity_map()
def _initialize_trackbars(self):
"""
Initialize trackbars by discovering ``block_matcher``'s parameters.
"""
for parameter in self.block_matcher.parameter_maxima.keys():
maximum = self.block_matcher.parameter_maxima[parameter]
if not maximum:
maximum = self.shortest_dimension
cv2.createTrackbar(parameter, self.window_name,
self.block_matcher.__getattribute__(parameter),
maximum,
partial(self._set_value, parameter))
def _save_bm_state(self):
"""Save current state of ``block_matcher``."""
for parameter in self.block_matcher.parameter_maxima.keys():
self.bm_settings[parameter].append(
self.block_matcher.__getattribute__(parameter))
def __init__(self, block_matcher, calibration, image_pair):
"""
Initialize tuner window and tune given pair.
``block_matcher`` is a ``BlockMatcher``, ``calibration`` is a
``StereoCalibration`` and ``image_pair`` is a rectified image pair.
"""
#: Stereo calibration to find Stereo BM settings for
self.calibration = calibration
#: (left, right) image pair to find disparity between
self.pair = image_pair
#: Block matcher to be tuned
self.block_matcher = block_matcher
#: Shortest dimension of image
self.shortest_dimension = min(self.pair[0].shape[:2])
#: Settings chosen for ``BlockMatcher``
self.bm_settings = {}
for parameter in self.block_matcher.parameter_maxima.keys():
self.bm_settings[parameter] = []
cv2.namedWindow(self.window_name)
self._initialize_trackbars()
self.tune_pair(image_pair)
def update_disparity_map(self):
"""
Update disparity map in GUI.
The disparity image is normalized to the range 0-255 and then divided by
255, because OpenCV multiplies it by 255 when displaying. This is
because the pixels are stored as floating points.
"""
disparity = self.block_matcher.get_disparity(self.pair)
norm_coeff = 255 / disparity.max()
cv2.imshow(self.window_name, disparity * norm_coeff / 255)
cv2.waitKey()
def tune_pair(self, pair):
"""Tune a pair of images."""
self._save_bm_state()
self.pair = pair
self.update_disparity_map()
def report_settings(self, parameter):
"""
Report chosen settings for ``parameter`` in ``block_matcher``.
``bm_settings`` is updated to include the latest state before work is
begun. This state is removed at the end so that the method has no side
effects. All settings are reported except for the first one on record,
which is ``block_matcher``'s default setting.
"""
self._save_bm_state()
report = []
settings_list = self.bm_settings[parameter][1:]
unique_values = list(set(settings_list))
value_frequency = {}
for value in unique_values:
value_frequency[settings_list.count(value)] = value
frequencies = value_frequency.keys()
frequencies.sort(reverse=True)
header = "{} value | Selection frequency".format(parameter)
left_column_width = len(header[:-21])
right_column_width = 21
report.append(header)
report.append("{}|{}".format("-" * left_column_width,
"-" * right_column_width))
for frequency in frequencies:
left_column = str(value_frequency[frequency]).center(
left_column_width)
right_column = str(frequency).center(right_column_width)
report.append("{}|{}".format(left_column, right_column))
# Remove newest settings
for param in self.block_matcher.parameter_maxima.keys():
self.bm_settings[param].pop(-1)
return "\n".join(report)
|
gpl-2.0
| 6,180,144,040,576,407,000
| 39.858974
| 80
| 0.633616
| false
| 4.15515
| false
| false
| false
|
Affirm/cabot
|
cabot/cabotapp/monitor.py
|
1
|
1567
|
from celery.signals import task_success, task_failure
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
try:
from boto.ec2 import cloudwatch
if not settings.AWS_CLOUDWATCH_SYNC:
CONNECTION = None
else:
region = settings.AWS_CLOUDWATCH_REGION
access_key = settings.AWS_CLOUDWATCH_ACCESS_KEY
secret_key = settings.AWS_CLOUDWATCH_SECRET_KEY
NAMESPACE = settings.AWS_CLOUDWATCH_NAMESPACE
PREFIX = settings.AWS_CLOUDWATCH_PREFIX
CONNECTION = cloudwatch.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
except ImportError:
NAMESPACE = None
PREFIX = None
CONNECTION = None
def _notify_cloudwatch(task_name, state):
'''
Update cloudwatch with a metric alert about a task
'''
if CONNECTION:
if PREFIX:
metric = '%s.%s.%s' % (PREFIX, task_name, state)
else:
metric = '%s.%s' % (task_name, state)
try:
CONNECTION.put_metric_data(NAMESPACE, metric, 1)
except:
logger.exception('Error sending cloudwatch metric')
@task_success.connect
def notify_success(sender=None, *args, **kwargs):
'''
Update cloudwatch about a task success
'''
_notify_cloudwatch(sender.name, 'success')
@task_failure.connect
def notify_failure(sender=None, *args, **kwargs):
'''
Update cloudwatch about a task failure
'''
_notify_cloudwatch(sender.name, 'failure')
|
mit
| -6,029,229,335,493,826,000
| 25.116667
| 63
| 0.635609
| false
| 3.869136
| false
| false
| false
|
kylebegovich/ProjectEuler
|
Python/Solved/Page2/Problem60.py
|
1
|
2031
|
from Euler import prime_sieve, is_prime
result = int(100000000)
primes = prime_sieve(30000)
pairs = None
def concat(first, second):
return int(str(first) + str(second))
def make_pairs(list_of_primes):
pairs = list()
if list_of_primes is None:
return
for elem in list_of_primes:
for other_elem in list_of_primes:
if elem is other_elem:
continue
pairs.append(concat(elem, other_elem))
return pairs
def main():
answers = list()
for index_a in range(0, len(primes)):
if primes[index_a] * 5 >= result: break
if pairs[index_a] is None: pairs[index_a] = make_pairs([index_a])
for index_b in range(index_a, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_b]]))]):
answers.append(primes[index_b])
for index_c in range(index_b, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_c]]))]):
answers.append(primes[index_c])
for index_d in range(index_c, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_d]]))]):
answers.append(primes[index_d])
for index_e in range(index_d, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_e]]))]):
answers.append(primes[index_e])
return answers
return "Failed", answers
def test_concat():
print(concat(5, 5))
print(concat(1512, 4))
print(concat(9, 0))
def test_make_pairs():
print(make_pairs([1, 3, 5]))
print(make_pairs([7, 9]))
print(make_pairs([75, 23, 18]))
test = make_pairs([3, 7, 109, 673])
for elem in iter(test):
print(elem, is_prime(elem))
print(main())
# SOLVED : 26033
|
gpl-3.0
| 8,671,600,554,278,434,000
| 27.605634
| 113
| 0.529296
| false
| 3.47774
| false
| false
| false
|
Pavel-Durov/pynetwork
|
pynetwork/mail.py
|
1
|
6010
|
"""Script for generating mail content and sending emails to gmail accounts"""
import smtplib
import chart
import time
import fsutil
import timeutil
import logging
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from jinja2 import Environment
from email.mime.multipart import MIMEMultipart
class EmailSender(object):
"""Responsible for emails sending"""
SUBJECT_EMAIL = "Here is your network check update."
GMAIL_SMTP = 'smtp.gmail.com:587'
def send_gmail(self, message_content, chart_image_path):
"""Sends gmail to specified account"""
receiver = self.__config.get_receiver_gmail_account
logging.getLogger("PYNETWORK").info("sending email to: " + receiver)
server = smtplib.SMTP(self.GMAIL_SMTP)
server.ehlo()
server.starttls()
# Record the MIME types of both parts - text/plain and text/html.
sender = self.__config.get_agent_gmail_account
msg = MIMEMultipart('alternative')
msg['Subject'] = self.SUBJECT_EMAIL
msg['From'] = sender
msg['To'] = receiver
filename = chart.get_daily_chart_path(self.__config, timeutil.utc_now())
if self.__config.get_attach_mail_chart and fsutil.file_exist(filename):
self.__attach_chart(filename, msg)
if fsutil.file_exist(chart_image_path):
fp = open(chart_image_path, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced in html
msgImage.add_header('Content-ID', '<networkGraphImage>')
msg.attach(msgImage)
# Attach parts into message container.
msg.attach(MIMEText(message_content, 'html'))
if server.login(sender, self.__config.get_agent_gmail_password):
server.sendmail(sender, receiver, msg.as_string())
server.quit()
else:
logging.getLogger("PYNETWORK").error("could not login :(")
def __attach_chart(self, filename, msg):
attachment = open(filename, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
def __init__(self, config):
self.__config = config
class MessageFormatter(object):
"""Formats email content"""
OK_CSS_CLASS = "ok"
NOT_OK_CSS_CLASS = "not-ok"
def __init__(self, config):
self.__config = config
self.MAIL_TEMPLATE_PATH = config.PROJ_PATH + "/templates/html_templates/mail_template.html"
self.__env = Environment(line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}")
def format_message(self, result):
"""Formats message as html"""
html = self.__create_html(result)
if self.__config.get_write_html_file:
fsutil.write_to_file(self.__config.OUTPUT_HTML_FILE, html)
return html
def __speed_check_title_html(self, result):
if self.__config.get_downlad_constraint != self.__config.UNSET_CONSTRAINT:
download_check = result.get_download_speed < self.__config.get_downlad_constraint
else:
download_check = False
if self.__config.get_upload_constraint != self.__config.UNSET_CONSTRAINT:
upload_check = result.get_upload_speed < self.__config.get_upload_constraint
else:
upload_check = False
if self.__config.get_ping_constraint != self.__config.UNSET_CONSTRAINT:
ping_check = result.get_ping_speed < self.__config.get_ping_constraint
else:
ping_check = False
title = 'Network'
ok_status = False
if download_check or upload_check or ping_check:
multiple = False
if download_check:
title = title + " download"
multiple = True
if upload_check:
if multiple:
title = title + ", "
title = title + " upload"
multiple = True
if ping_check:
if multiple:
title = title + ", "
title = title + " ping"
multiple = True
if multiple:
title = title + " issues"
else:
title = title + " issue"
else:
title = title + ' speed is OK'
ok_status = True
return {'content': title, 'status': ok_status}
def __create_html(self, result):
title = self.__speed_check_title_html(result)
#public_ip_addr = get('https://api.ipify.org').text
bcss_class = self.OK_CSS_CLASS if title["status"] else self.NOT_OK_CSS_CLASS
html_template = fsutil.get_file_content(self.MAIL_TEMPLATE_PATH)
tmpl = self.__env.from_string(html_template)
chart_img_src = chart.get_daily_chart_image_path(self.__config, result.get_time_stamp)
return tmpl.render(css=fsutil.get_file_content(self.__config.MAIN_CSS_PATH),
title=title["content"],
body_css_class=bcss_class,
ping_speed=str(result.get_ping_speed),
upload_speed=str(result.get_upload_speed),
download_speed=str(result.get_download_speed),
upload_constraint=str(self.__config.get_upload_constraint),
download_constraint=str(self.__config.get_downlad_constraint),
ping_constraint=str(self.__config.get_ping_constraint),
time_stamp=timeutil.format_to_time_str(result.get_time_stamp),
img_src=chart_img_src)
|
mit
| 7,932,398,313,926,550,000
| 34.988024
| 99
| 0.582196
| false
| 4.139118
| true
| false
| false
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/horovod/horovod/tensorflow/compression.py
|
1
|
3057
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications copyright (C) 2017 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient compression algorithms."""
from enum import Enum
from functools import partial
import tensorflow as tf
class NoneCompression(object):
"""Default no-op compression."""
__instance = None
def __init__(self):
if NoneCompression.__instance is not None:
raise Exception("NoneCompression is a singleton")
else:
NoneCompression.__instance = self
def compress(self, tensor):
"""Returns the tensor unmodified."""
return tensor
def decompress(self, tensor):
"""Returns the tensor unmodified."""
return tensor
@staticmethod
def instance():
"""Returns the singleton instance."""
if NoneCompression.__instance is None:
NoneCompression()
return NoneCompression.__instance
class FP16Compression(object):
"""Compress all floating point gradients to 16-bit."""
def __init__(self, dtype):
"""Compresses tensors of the given dtype, and decompresses back."""
self._dtype = dtype
def compress(self, tensor):
"""Downcasts the tensor to 16-bit."""
if tensor.dtype != self._dtype:
raise ValueError('expected tensor of type %s but given %s' %
(str(self._dtype), str(tensor.dtype)))
tensor_compressed = tensor
if self._dtype.is_floating:
# Only allow compression from other floating point types
tensor_compressed = tf.cast(tensor, dtype=tf.float16)
return tensor_compressed
def decompress(self, tensor):
"""Upcasts the tensor to the dtype of the last compressed tensor."""
tensor_decompressed = tensor
if self._dtype.is_floating:
tensor_decompressed = tf.cast(tensor, dtype=self._dtype)
return tensor_decompressed
class Compression(Enum):
"""Optional gradient compression algorithm used during allreduce."""
"""Do not compress the gradients. This is the default."""
none = partial(lambda dtype: NoneCompression.instance())
"""Compress all floating point gradients to 16-bit."""
fp16 = partial(lambda dtype: FP16Compression(dtype))
def get_compressor(self, dtype):
"""Returns a new compressor instance for the given dtype."""
return self.value(dtype)
|
apache-2.0
| -451,647,372,317,546,940
| 34.546512
| 80
| 0.649984
| false
| 4.739535
| false
| false
| false
|
fuzeman/plex.py
|
plex/objects/library/metadata/clip.py
|
1
|
1956
|
from plex.objects.core.base import Property
from plex.objects.library.metadata.base import Metadata
from plex.objects.library.metadata.photo import PhotoAlbum
from plex.objects.library.video import Video
class Clip(Video, Metadata):
grandparent = Property(resolver=lambda: Clip.construct_grandparent)
parent = Property(resolver=lambda: Clip.construct_parent)
extra_type = Property('extraType', int)
index = Property(type=int)
filename = Property
device = Property
def __repr__(self):
if self.grandparent and self.parent:
return '<Clip %r - %r - %r>' % (
self.grandparent.title,
self.parent.title,
self.title
)
if self.grandparent:
return '<Clip %r - %r>' % (
self.grandparent.title,
self.title
)
if self.parent:
return '<Clip %r - %r>' % (
self.parent.title,
self.title
)
return '<Clip %r>' % self.title
@staticmethod
def construct_grandparent(client, node):
attribute_map = {
'key': 'grandparentKey',
'ratingKey': 'grandparentRatingKey',
'index': 'grandparentIndex',
'title': 'grandparentTitle',
'art': 'grandparentArt',
'thumb': 'grandparentThumb'
}
return PhotoAlbum.construct(client, node, attribute_map, child=True)
@staticmethod
def construct_parent(client, node):
attribute_map = {
'key': 'parentKey',
'ratingKey': 'parentRatingKey',
'index': 'parentIndex',
'title': 'parentTitle',
'art': 'parentArt',
'thumb': 'parentThumb'
}
return PhotoAlbum.construct(client, node, attribute_map, child=True)
|
mit
| 7,442,453,030,430,962,000
| 27.764706
| 76
| 0.534765
| false
| 4.289474
| false
| false
| false
|
hnlaomie/python-tools
|
util/file/build_report.py
|
1
|
2895
|
# -*- coding: utf-8 -*-
import os, sys, csv
def get_order_list(order_file: str) -> [] :
order_list = []
with open(order_file, "r") as csv_input:
reader = csv.reader(csv_input)
for row in reader:
order_list.append(row[0])
return order_list
def save_report(data: [], order_id: str, out_path: str, report_file: str):
order_file = report_file.replace("orderId", order_id)
out_file = os.path.join(out_path, order_file)
with open(out_file, "w") as csv_output:
writer = csv.writer(csv_output, lineterminator='\n')
writer.writerows(data)
def build_report(order_file: str, csv_file: str, out_path: str):
# used order map
used_order_map = {}
# row data list
row_list = []
report_file = os.path.basename(csv_file)
pre_order_id = None
# header on first line
is_header = True
header = None
with open(csv_file, "r") as csv_input:
reader = csv.reader(csv_input)
for row in reader:
if (len(row) > 0):
if is_header:
# save first line to header, first column is order_id
header = row[1:]
is_header = False
else:
order_id = row[0]
if pre_order_id == None:
pre_order_id = order_id
# save data to file when line change to next order_id
if order_id != pre_order_id:
row_list.insert(0, header)
used_order_map[pre_order_id] = pre_order_id
save_report(row_list, pre_order_id, out_path, report_file)
row_list.clear()
pre_order_id = order_id
row_list.append(row[1:])
if pre_order_id != None:
row_list.insert(0, header)
used_order_map[pre_order_id] = pre_order_id
save_report(row_list, pre_order_id, out_path, report_file)
# save empty report with header
row_list.clear()
row_list.append(header)
order_list = get_order_list(order_file)
for order_id in order_list:
if (used_order_map.get(order_id) == None):
save_report(row_list, order_id, out_path, report_file)
"""
usage: python build_report.py [order_file] [csv_file] [out_path]
read data from csv_file, group by order_id and output multipule reports to out_path.
if order without data, output empty report with header.
order_file: with multiple orderId
csv_file: first column is "orderId"
out_path: report's directory
"""
if __name__ == '__main__':
if (len(sys.argv) > 3):
order_file = sys.argv[1]
csv_file = sys.argv[2]
out_path = sys.argv[3]
build_report(order_file, csv_file, out_path)
else:
print("usage: python build_report.py [order_file] [csv_file] [out_path]")
|
mit
| -8,762,075,644,433,950,000
| 32.674419
| 84
| 0.559585
| false
| 3.438242
| false
| false
| false
|
danielquinn/paperless
|
src/paperless/settings.py
|
1
|
11187
|
"""
Django settings for paperless project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import json
import os
import re
from dotenv import load_dotenv
# Tap paperless.conf if it's available
if os.path.exists("/etc/paperless.conf"):
load_dotenv("/etc/paperless.conf")
elif os.path.exists("/usr/local/etc/paperless.conf"):
load_dotenv("/usr/local/etc/paperless.conf")
def __get_boolean(key, default="NO"):
"""
Return a boolean value based on whatever the user has supplied in the
environment based on whether the value "looks like" it's True or not.
"""
return bool(os.getenv(key, default).lower() in ("yes", "y", "1", "t", "true"))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# The secret key has a default that should be fine so long as you're hosting
# Paperless on a closed network. However, if you're putting this anywhere
# public, you should change the key to something unique and verbose.
SECRET_KEY = os.getenv(
"PAPERLESS_SECRET_KEY",
"e11fl1oa-*ytql8p)(06fbj4ukrlo+n7k&q5+$1md7i+mge=ee"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = __get_boolean("PAPERLESS_DEBUG", "YES")
LOGIN_URL = "admin:login"
ALLOWED_HOSTS = ["*"]
_allowed_hosts = os.getenv("PAPERLESS_ALLOWED_HOSTS")
if _allowed_hosts:
ALLOWED_HOSTS = _allowed_hosts.split(",")
FORCE_SCRIPT_NAME = os.getenv("PAPERLESS_FORCE_SCRIPT_NAME")
# Application definition
INSTALLED_APPS = [
"whitenoise.runserver_nostatic",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"corsheaders",
"django_extensions",
"paperless",
"documents.apps.DocumentsConfig",
"reminders.apps.RemindersConfig",
"paperless_tesseract.apps.PaperlessTesseractConfig",
"paperless_text.apps.PaperlessTextConfig",
"django.contrib.admin",
"rest_framework",
"crispy_forms",
"django_filters",
"djangoql",
]
if os.getenv("PAPERLESS_INSTALLED_APPS"):
INSTALLED_APPS += os.getenv("PAPERLESS_INSTALLED_APPS").split(",")
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# Enable whitenoise compression and caching
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# We allow CORS from localhost:8080
CORS_ORIGIN_WHITELIST = tuple(os.getenv("PAPERLESS_CORS_ALLOWED_HOSTS", "http://localhost:8080,https://localhost:8080").split(","))
# If auth is disabled, we just use our "bypass" authentication middleware
if bool(os.getenv("PAPERLESS_DISABLE_LOGIN", "false").lower() in ("yes", "y", "1", "t", "true")):
_index = MIDDLEWARE.index("django.contrib.auth.middleware.AuthenticationMiddleware")
MIDDLEWARE[_index] = "paperless.middleware.Middleware"
ROOT_URLCONF = 'paperless.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paperless.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(
os.getenv(
"PAPERLESS_DBDIR",
os.path.join(BASE_DIR, "..", "data")
),
"db.sqlite3"
)
}
}
if os.getenv("PAPERLESS_DBUSER"):
DATABASES["default"] = {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.getenv("PAPERLESS_DBNAME", "paperless"),
"USER": os.getenv("PAPERLESS_DBUSER"),
}
if os.getenv("PAPERLESS_DBPASS"):
DATABASES["default"]["PASSWORD"] = os.getenv("PAPERLESS_DBPASS")
if os.getenv("PAPERLESS_DBHOST"):
DATABASES["default"]["HOST"] = os.getenv("PAPERLESS_DBHOST")
if os.getenv("PAPERLESS_DBPORT"):
DATABASES["default"]["PORT"] = os.getenv("PAPERLESS_DBPORT")
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.getenv("PAPERLESS_TIME_ZONE", "UTC")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.getenv(
"PAPERLESS_STATICDIR", os.path.join(BASE_DIR, "..", "static"))
MEDIA_ROOT = os.getenv(
"PAPERLESS_MEDIADIR", os.path.join(BASE_DIR, "..", "media"))
STATIC_URL = os.getenv("PAPERLESS_STATIC_URL", "/static/")
MEDIA_URL = os.getenv("PAPERLESS_MEDIA_URL", "/media/")
# Other
# Disable Django's artificial limit on the number of form fields to submit at
# once. This is a protection against overloading the server, but since this is
# a self-hosted sort of gig, the benefits of being able to mass-delete a tonne
# of log entries outweight the benefits of such a safeguard.
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Paperless-specific stuff
# You shouldn't have to edit any of these values. Rather, you can set these
# values in /etc/paperless.conf instead.
# ----------------------------------------------------------------------------
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"consumer": {
"class": "documents.loggers.PaperlessLogger",
}
},
"loggers": {
"documents": {
"handlers": ["consumer"],
"level": os.getenv("PAPERLESS_CONSUMER_LOG_LEVEL", "INFO"),
},
},
}
# The default language that tesseract will attempt to use when parsing
# documents. It should be a 3-letter language code consistent with ISO 639.
OCR_LANGUAGE = os.getenv("PAPERLESS_OCR_LANGUAGE", "eng")
# The amount of threads to use for OCR
OCR_THREADS = os.getenv("PAPERLESS_OCR_THREADS")
# OCR all documents?
OCR_ALWAYS = __get_boolean("PAPERLESS_OCR_ALWAYS")
# If this is true, any failed attempts to OCR a PDF will result in the PDF
# being indexed anyway, with whatever we could get. If it's False, the file
# will simply be left in the CONSUMPTION_DIR.
FORGIVING_OCR = __get_boolean("PAPERLESS_FORGIVING_OCR")
# GNUPG needs a home directory for some reason
GNUPG_HOME = os.getenv("HOME", "/tmp")
# Convert is part of the ImageMagick package
CONVERT_BINARY = os.getenv("PAPERLESS_CONVERT_BINARY", "convert")
CONVERT_TMPDIR = os.getenv("PAPERLESS_CONVERT_TMPDIR")
CONVERT_MEMORY_LIMIT = os.getenv("PAPERLESS_CONVERT_MEMORY_LIMIT")
CONVERT_DENSITY = os.getenv("PAPERLESS_CONVERT_DENSITY")
# Ghostscript
GS_BINARY = os.getenv("PAPERLESS_GS_BINARY", "gs")
# OptiPNG
OPTIPNG_BINARY = os.getenv("PAPERLESS_OPTIPNG_BINARY", "optipng")
# Unpaper
UNPAPER_BINARY = os.getenv("PAPERLESS_UNPAPER_BINARY", "unpaper")
# This will be created if it doesn't exist
SCRATCH_DIR = os.getenv("PAPERLESS_SCRATCH_DIR", "/tmp/paperless")
# This is where Paperless will look for PDFs to index
CONSUMPTION_DIR = os.getenv("PAPERLESS_CONSUMPTION_DIR")
# (This setting is ignored on Linux where inotify is used instead of a
# polling loop.)
# The number of seconds that Paperless will wait between checking
# CONSUMPTION_DIR. If you tend to write documents to this directory very
# slowly, you may want to use a higher value than the default.
CONSUMER_LOOP_TIME = int(os.getenv("PAPERLESS_CONSUMER_LOOP_TIME", 10))
# Pre-2.x versions of Paperless stored your documents locally with GPG
# encryption, but that is no longer the default. This behaviour is still
# available, but it must be explicitly enabled by setting
# `PAPERLESS_PASSPHRASE` in your environment or config file. The default is to
# store these files unencrypted.
#
# Translation:
# * If you're a new user, you can safely ignore this setting.
# * If you're upgrading from 1.x, this must be set, OR you can run
# `./manage.py change_storage_type gpg unencrypted` to decrypt your files,
# after which you can unset this value.
PASSPHRASE = os.getenv("PAPERLESS_PASSPHRASE")
# Trigger a script after every successful document consumption?
PRE_CONSUME_SCRIPT = os.getenv("PAPERLESS_PRE_CONSUME_SCRIPT")
POST_CONSUME_SCRIPT = os.getenv("PAPERLESS_POST_CONSUME_SCRIPT")
# Whether to display a selected document inline, or download it as attachment:
INLINE_DOC = __get_boolean("PAPERLESS_INLINE_DOC")
# The number of items on each page in the web UI. This value must be a
# positive integer, but if you don't define one in paperless.conf, a default of
# 100 will be used.
PAPERLESS_LIST_PER_PAGE = int(os.getenv("PAPERLESS_LIST_PER_PAGE", 100))
FY_START = os.getenv("PAPERLESS_FINANCIAL_YEAR_START")
FY_END = os.getenv("PAPERLESS_FINANCIAL_YEAR_END")
# Specify the default date order (for autodetected dates)
DATE_ORDER = os.getenv("PAPERLESS_DATE_ORDER", "DMY")
FILENAME_DATE_ORDER = os.getenv("PAPERLESS_FILENAME_DATE_ORDER")
# Transformations applied before filename parsing
FILENAME_PARSE_TRANSFORMS = []
for t in json.loads(os.getenv("PAPERLESS_FILENAME_PARSE_TRANSFORMS", "[]")):
FILENAME_PARSE_TRANSFORMS.append((re.compile(t["pattern"]), t["repl"]))
# Specify for how many years a correspondent is considered recent. Recent
# correspondents will be shown in a separate "Recent correspondents" filter as
# well. Set to 0 to disable this filter.
PAPERLESS_RECENT_CORRESPONDENT_YEARS = int(os.getenv(
"PAPERLESS_RECENT_CORRESPONDENT_YEARS", 0))
# Specify the filename format for out files
PAPERLESS_FILENAME_FORMAT = os.getenv("PAPERLESS_FILENAME_FORMAT")
|
gpl-3.0
| -3,477,589,117,054,092,300
| 32
| 131
| 0.699026
| false
| 3.353417
| false
| false
| false
|
rbuffat/pyidf
|
tests/test_externalinterfacefunctionalmockupunitexporttoschedule.py
|
1
|
1925
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.external_interface import ExternalInterfaceFunctionalMockupUnitExportToSchedule
log = logging.getLogger(__name__)
class TestExternalInterfaceFunctionalMockupUnitExportToSchedule(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_externalinterfacefunctionalmockupunitexporttoschedule(self):
pyidf.validation_level = ValidationLevel.error
obj = ExternalInterfaceFunctionalMockupUnitExportToSchedule()
# alpha
var_schedule_name = "Schedule Name"
obj.schedule_name = var_schedule_name
# object-list
var_schedule_type_limits_names = "object-list|Schedule Type Limits Names"
obj.schedule_type_limits_names = var_schedule_type_limits_names
# alpha
var_fmu_variable_name = "FMU Variable Name"
obj.fmu_variable_name = var_fmu_variable_name
# real
var_initial_value = 4.4
obj.initial_value = var_initial_value
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].schedule_type_limits_names, var_schedule_type_limits_names)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].fmu_variable_name, var_fmu_variable_name)
self.assertAlmostEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].initial_value, var_initial_value)
|
apache-2.0
| 7,666,285,236,414,696,000
| 37.52
| 147
| 0.720519
| false
| 3.767123
| false
| false
| false
|
facebookexperimental/eden
|
eden/hg-server/tests/test-command-template-t.py
|
1
|
121759
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# isort:skip_file
from __future__ import absolute_import
import datetime
import os
import sys
import warnings
from edenscm.mercurial import namespaces
from edenscm.mercurial import pycompat
from testutil.autofix import eq
from testutil.dott import feature, sh, testtmp # noqa: F401
is_py3 = sys.version_info[0] >= 3
sh % "setconfig 'extensions.treemanifest=!'"
sh % "setconfig 'ui.allowemptycommit=1'"
sh % "hg init a"
sh % "cd a"
sh % "echo a" > "a"
sh % "hg add a"
sh % "echo line 1" > "b"
sh % "echo line 2" >> "b"
sh % "hg commit -l b -d '1000000 0' -u 'User Name <user@hostname>'"
sh % "hg add b"
sh % "echo other 1" > "c"
sh % "echo other 2" >> "c"
sh % "echo" >> "c"
sh % "echo other 3" >> "c"
sh % "hg commit -l c -d '1100000 0' -u 'A. N. Other <other@place>'"
sh % "hg add c"
sh % "hg commit -m 'no person' -d '1200000 0' -u 'other@place'"
sh % "echo c" >> "c"
sh % "hg commit -m 'no user, no domain' -d '1300000 0' -u person"
sh % "hg commit -m 'new branch' -d '1400000 0' -u person"
sh % "hg bookmark foo"
sh % "hg co -q 3"
sh % "echo other 4" >> "d"
sh % "hg add d"
sh % "hg commit -m 'new head' -d '1500000 0' -u person"
sh % "hg merge -q foo"
sh % "hg commit -m merge -d '1500001 0' -u person"
sh % "hg log -r . -T '{username}'" == "test"
# Test arithmetic operators have the right precedence:
sh % 'hg log -l 1 -T \'{date(date, "%Y") + 5 * 10} {date(date, "%Y") - 2 * 3}\\n\'' == "2020 1964"
sh % 'hg log -l 1 -T \'{date(date, "%Y") * 5 + 10} {date(date, "%Y") * 3 - 2}\\n\'' == "9860 5908"
# Test division:
sh % "hg debugtemplate -r0 -v '{5 / 2} {mod(5, 2)}\\n'" == r"""
(template
(/
(integer '5')
(integer '2'))
(string ' ')
(func
(symbol 'mod')
(list
(integer '5')
(integer '2')))
(string '\n'))
2 1"""
sh % "hg debugtemplate -r0 -v '{5 / -2} {mod(5, -2)}\\n'" == r"""
(template
(/
(integer '5')
(negate
(integer '2')))
(string ' ')
(func
(symbol 'mod')
(list
(integer '5')
(negate
(integer '2'))))
(string '\n'))
-3 -1"""
sh % "hg debugtemplate -r0 -v '{-5 / 2} {mod(-5, 2)}\\n'" == r"""
(template
(/
(negate
(integer '5'))
(integer '2'))
(string ' ')
(func
(symbol 'mod')
(list
(negate
(integer '5'))
(integer '2')))
(string '\n'))
-3 1"""
sh % "hg debugtemplate -r0 -v '{-5 / -2} {mod(-5, -2)}\\n'" == r"""
(template
(/
(negate
(integer '5'))
(negate
(integer '2')))
(string ' ')
(func
(symbol 'mod')
(list
(negate
(integer '5'))
(negate
(integer '2'))))
(string '\n'))
2 -1"""
# Filters bind closer than arithmetic:
sh % "hg debugtemplate -r0 -v '{revset(\".\")|count - 1}\\n'" == r"""
(template
(-
(|
(func
(symbol 'revset')
(string '.'))
(symbol 'count'))
(integer '1'))
(string '\n'))
0"""
# But negate binds closer still:
sh % "hg debugtemplate -r0 -v '{1-3|stringify}\\n'" == r"""
(template
(-
(integer '1')
(|
(integer '3')
(symbol 'stringify')))
(string '\n'))
hg: parse error: arithmetic only defined on integers
[255]"""
sh % "hg debugtemplate -r0 -v '{-3|stringify}\\n'" == r"""
(template
(|
(negate
(integer '3'))
(symbol 'stringify'))
(string '\n'))
-3"""
# Filters bind as close as map operator:
sh % "hg debugtemplate -r0 -v '{desc|splitlines % \"{line}\\n\"}'" == r"""
(template
(%
(|
(symbol 'desc')
(symbol 'splitlines'))
(template
(symbol 'line')
(string '\n'))))
line 1
line 2"""
# Keyword arguments:
sh % "hg debugtemplate -r0 -v '{foo=bar|baz}'" == r"""
(template
(keyvalue
(symbol 'foo')
(|
(symbol 'bar')
(symbol 'baz'))))
hg: parse error: can't use a key-value pair in this context
[255]"""
sh % "hg debugtemplate '{pad(\"foo\", width=10, left=true)}\\n'" == " foo"
# Call function which takes named arguments by filter syntax:
sh % "hg debugtemplate '{\" \"|separate}'"
sh % 'hg debugtemplate \'{("not", "an", "argument", "list")|separate}\'' == r"""
hg: parse error: unknown method 'list'
[255]"""
# Second branch starting at nullrev:
sh % "hg update null" == "0 files updated, 0 files merged, 4 files removed, 0 files unresolved"
with open("second", "wb") as f:
# Valid utf-8 character
if is_py3:
f.write("🥈".encode("utf-8"))
else:
f.write("🥈")
# Invalid utf-8 character
f.write(b"\xe2\x28\xa1")
f.write(b"\n")
sh % "hg add second"
sh % "hg commit -m second -d '1000000 0' -u 'User Name <user@hostname>'"
sh % "echo third" > "third"
sh % "hg add third"
sh % "hg mv second fourth"
sh % "hg commit -m third -d '2020-01-01 10:01 UTC'"
sh % "hg log --template '{join(file_copies, \",\\n\")}\\n' -r ." == "fourth (second)"
sh % "hg log -T '{file_copies % \"{source} -> {name}\\n\"}' -r ." == "second -> fourth"
sh % 'hg log -T \'{rev} {ifcontains("fourth", file_copies, "t", "f")}\\n\' -r \'.:7\'' == r"""
8 t
7 f"""
# Working-directory revision has special identifiers, though they are still
# experimental:
sh % "hg log -r 'wdir()' -T '{rev}:{node}\\n'" == "2147483647:ffffffffffffffffffffffffffffffffffffffff"
# Some keywords are invalid for working-directory revision, but they should
# never cause crash:
sh % "hg log -r 'wdir()' -T '{manifest}\\n'"
# Quoting for ui.logtemplate
sh % "hg tip --config 'ui.logtemplate={rev}\\n'" == "8"
sh % "hg tip --config 'ui.logtemplate='\\''{rev}\\n'\\'''" == "8"
sh % "hg tip --config 'ui.logtemplate=\"{rev}\\n\"'" == "8"
sh % "hg tip --config 'ui.logtemplate=n{rev}\\n'" == "n8"
# Make sure user/global hgrc does not affect tests
sh % "echo '[ui]'" > ".hg/hgrc"
sh % "echo 'logtemplate ='" >> ".hg/hgrc"
sh % "echo 'style ='" >> ".hg/hgrc"
# Add some simple styles to settings
(
sh % "cat"
<< r"""
[templates]
simple = "{rev}\n"
simple2 = {rev}\n
rev = "should not precede {rev} keyword\n"
"""
>> ".hg/hgrc"
)
sh % "hg log -l1 -Tsimple" == "8"
sh % "hg log -l1 -Tsimple2" == "8"
sh % "hg log -l1 -Trev" == "should not precede 8 keyword"
sh % "hg log -l1 -T '{simple}'" == "8"
# Map file shouldn't see user templates:
sh % "cat" << r"""
changeset = 'nothing expanded:{simple}\n'
""" > "tmpl"
sh % "hg log -l1 --style ./tmpl" == "nothing expanded:"
# Test templates and style maps in files:
sh % "echo '{rev}'" > "tmpl"
sh % "hg log -l1 -T./tmpl" == "8"
sh % "hg log -l1 -Tblah/blah" == "blah/blah"
sh % "echo 'changeset = \"{rev}\\n\"'" > "map-simple"
sh % "hg log -l1 -T./map-simple" == "8"
# a map file may have [templates] and [templatealias] sections:
sh % "cat" << r"""
[templates]
changeset = "{a}\n"
[templatealias]
a = rev
""" > "map-simple"
sh % "hg log -l1 -T./map-simple" == "8"
# so it can be included in hgrc
sh % "cat" << r"""
%include map-simple
[templates]
foo = "{changeset}"
""" > "myhgrc"
sh % "'HGRCPATH=./myhgrc' hg log -l1 -Tfoo" == "8"
sh % "'HGRCPATH=./myhgrc' hg log -l1 '-T{a}\\n'" == "8"
# Test template map inheritance
sh % "echo '__base__ = map-cmdline.default'" > "map-simple"
sh % "echo 'cset = \"changeset: ***{rev}***\\n\"'" >> "map-simple"
sh % "hg log -l1 -T./map-simple" == r"""
changeset: ***8***
user: test
date: Wed Jan 01 10:01:00 2020 +0000
summary: third"""
# Test docheader, docfooter and separator in template map
sh % "cat" << r"""
docheader = '\{\n'
docfooter = '\n}\n'
separator = ',\n'
changeset = ' {dict(rev, node|short)|json}'
""" > "map-myjson"
sh % "hg log -l2 -T./map-myjson" == r"""
{
{"node": "209edb6a1848", "rev": 8},
{"node": "88058a185da2", "rev": 7}
}"""
# Test docheader, docfooter and separator in [templates] section
(
sh % "cat"
<< r"""
[templates]
myjson = ' {dict(rev, node|short)|json}'
myjson:docheader = '\{\n'
myjson:docfooter = '\n}\n'
myjson:separator = ',\n'
:docheader = 'should not be selected as a docheader for literal templates\n'
"""
>> ".hg/hgrc"
)
sh % "hg log -l2 -Tmyjson" == r"""
{
{"node": "209edb6a1848", "rev": 8},
{"node": "88058a185da2", "rev": 7}
}"""
sh % "hg log -l1 '-T{rev}\\n'" == "8"
# Template should precede style option
sh % "hg log -l1 --style default -T '{rev}\\n'" == "8"
# Add a commit with empty description, to ensure that the templates
# below will omit the description line.
sh % "echo c" >> "c"
sh % "hg add c"
sh % "hg commit -qm ' '"
# Remove commit with empty commit message, so as to not pollute further
# tests.
sh % "hg debugstrip -q ."
# Revision with no copies (used to print a traceback):
sh % "hg tip -v --template '\\n'"
# Compact style works:
sh % "hg log -Tcompact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 user
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
[foo] 07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 other
other 1
1e4e1b8f71e0 1970-01-12 13:46 +0000 user
line 1"""
sh % "hg log -v --style compact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 User Name <user@hostname>
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other@place
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 A. N. Other <other@place>
other 1
other 2
other 3
1e4e1b8f71e0 1970-01-12 13:46 +0000 User Name <user@hostname>
line 1
line 2"""
sh % "hg log --debug --style compact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 User Name <user@hostname>
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other@place
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 A. N. Other <other@place>
other 1
other 2
other 3
1e4e1b8f71e0 1970-01-12 13:46 +0000 User Name <user@hostname>
line 1
line 2"""
# Test xml styles:
sh % "hg log --style xml -r 'not all()'" == r"""
<?xml version="1.0"?>
<log>
</log>"""
sh % "hg log --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
</logentry>
</log>"""
sh % "hg log -v --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
<paths>
<path action="A">fourth</path>
<path action="A">third</path>
<path action="R">second</path>
</paths>
<copies>
<copy source="second">fourth</copy>
</copies>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
<paths>
<path action="A">second</path>
</paths>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
<paths>
</paths>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
<paths>
<path action="A">d</path>
</paths>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
<paths>
</paths>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
<paths>
<path action="M">c</path>
</paths>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
<paths>
<path action="A">c</path>
</paths>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
<paths>
<path action="A">b</path>
</paths>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
<paths>
<path action="A">a</path>
</paths>
</logentry>
</log>"""
sh % "hg log --debug --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
<paths>
<path action="A">fourth</path>
<path action="A">third</path>
<path action="R">second</path>
</paths>
<copies>
<copy source="second">fourth</copy>
</copies>
<extra key="branch">default</extra>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
<paths>
<path action="A">second</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
<paths>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
<paths>
<path action="A">d</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
<paths>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
<paths>
<path action="M">c</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
<paths>
<path action="A">c</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
<paths>
<path action="A">b</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
<paths>
<path action="A">a</path>
</paths>
<extra key="branch">default</extra>
</logentry>
</log>"""
# Test JSON style:
sh % "hg log -k nosuch -Tjson" == "[]"
sh % "hg log -qr . -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a"
}
]"""
sh % "hg log -vpr . -Tjson --stat" == (
r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"],
"diffstat": " fourth | 1 +\n second | 1 -\n third | 1 +\n 3 files changed, 2 insertions(+), 1 deletions(-)\n","""
+ (
'\n "diff": "diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+🥈\udced\udcb3\udca2(\udced\udcb2\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-🥈\udced\udcb3\udca2(\udced\udcb2\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"\n'
if is_py3
else '\n "diff": "diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+🥈\xed\xb3\xa2(\xed\xb2\xa1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-🥈\xed\xb3\xa2(\xed\xb2\xa1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"\n'
)
+ r""" }
]"""
)
# honor --git but not format-breaking diffopts
sh % "hg --config 'diff.noprefix=True' log --git -vpr . -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"],
"diff": "diff --git a/second b/fourth\nrename from second\nrename to fourth\ndiff --git a/third b/third\nnew file mode 100644\n--- /dev/null\n+++ b/third\n@@ -0,0 +1,1 @@\n+third\n"
}
]"""
sh % "hg log -T json" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"]
},
{
"rev": 7,
"node": "88058a185da202d22e8ee0bb4d3515ff0ecb222b",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "second",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"]
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"]
},
{
"rev": 5,
"node": "13207e5a10d9fd28ec424934298e176197f2c67f",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500000, 0],
"desc": "new head",
"bookmarks": [],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"]
},
{
"rev": 4,
"node": "07fa1db1064879a32157227401eb44b322ae53ce",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1400000, 0],
"desc": "new branch",
"bookmarks": ["foo"],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"]
},
{
"rev": 3,
"node": "10e46f2dcbf4823578cf180f33ecf0b957964c47",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1300000, 0],
"desc": "no user, no domain",
"bookmarks": [],
"parents": ["97054abb4ab824450e9164180baf491ae0078465"]
},
{
"rev": 2,
"node": "97054abb4ab824450e9164180baf491ae0078465",
"branch": "default",
"phase": "draft",
"user": "other@place",
"date": [1200000, 0],
"desc": "no person",
"bookmarks": [],
"parents": ["b608e9d1a3f0273ccf70fb85fd6866b3482bf965"]
},
{
"rev": 1,
"node": "b608e9d1a3f0273ccf70fb85fd6866b3482bf965",
"branch": "default",
"phase": "draft",
"user": "A. N. Other <other@place>",
"date": [1100000, 0],
"desc": "other 1\nother 2\n\nother 3",
"bookmarks": [],
"parents": ["1e4e1b8f71e05681d422154f5421e385fec3454f"]
},
{
"rev": 0,
"node": "1e4e1b8f71e05681d422154f5421e385fec3454f",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "line 1\nline 2",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"]
}
]"""
sh % "hg heads -v -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"]
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"],
"files": []
}
]"""
sh % "hg log --debug -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"manifest": "102f85d6546830d0894e5420cdddaa12fe270c02",
"extra": {"branch": "default"},
"modified": [],
"added": ["fourth", "third"],
"removed": ["second"]
},
{
"rev": 7,
"node": "88058a185da202d22e8ee0bb4d3515ff0ecb222b",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "second",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"],
"manifest": "e3aa144e25d914ea34006bd7b3c266b7eb283c61",
"extra": {"branch": "default"},
"modified": [],
"added": ["second"],
"removed": []
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"],
"manifest": "4dc3def4f9b4c6e8de820f6ee74737f91e96a216",
"extra": {"branch": "default"},
"modified": [],
"added": [],
"removed": []
},
{
"rev": 5,
"node": "13207e5a10d9fd28ec424934298e176197f2c67f",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500000, 0],
"desc": "new head",
"bookmarks": [],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"],
"manifest": "4dc3def4f9b4c6e8de820f6ee74737f91e96a216",
"extra": {"branch": "default"},
"modified": [],
"added": ["d"],
"removed": []
},
{
"rev": 4,
"node": "07fa1db1064879a32157227401eb44b322ae53ce",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1400000, 0],
"desc": "new branch",
"bookmarks": ["foo"],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"],
"manifest": "cb5a1327723bada42f117e4c55a303246eaf9ccc",
"extra": {"branch": "default"},
"modified": [],
"added": [],
"removed": []
},
{
"rev": 3,
"node": "10e46f2dcbf4823578cf180f33ecf0b957964c47",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1300000, 0],
"desc": "no user, no domain",
"bookmarks": [],
"parents": ["97054abb4ab824450e9164180baf491ae0078465"],
"manifest": "cb5a1327723bada42f117e4c55a303246eaf9ccc",
"extra": {"branch": "default"},
"modified": ["c"],
"added": [],
"removed": []
},
{
"rev": 2,
"node": "97054abb4ab824450e9164180baf491ae0078465",
"branch": "default",
"phase": "draft",
"user": "other@place",
"date": [1200000, 0],
"desc": "no person",
"bookmarks": [],
"parents": ["b608e9d1a3f0273ccf70fb85fd6866b3482bf965"],
"manifest": "6e0e82995c35d0d57a52aca8da4e56139e06b4b1",
"extra": {"branch": "default"},
"modified": [],
"added": ["c"],
"removed": []
},
{
"rev": 1,
"node": "b608e9d1a3f0273ccf70fb85fd6866b3482bf965",
"branch": "default",
"phase": "draft",
"user": "A. N. Other <other@place>",
"date": [1100000, 0],
"desc": "other 1\nother 2\n\nother 3",
"bookmarks": [],
"parents": ["1e4e1b8f71e05681d422154f5421e385fec3454f"],
"manifest": "4e8d705b1e53e3f9375e0e60dc7b525d8211fe55",
"extra": {"branch": "default"},
"modified": [],
"added": ["b"],
"removed": []
},
{
"rev": 0,
"node": "1e4e1b8f71e05681d422154f5421e385fec3454f",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "line 1\nline 2",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"],
"manifest": "a0c8bcbbb45c63b90b70ad007bf38961f64f2af0",
"extra": {"branch": "default"},
"modified": [],
"added": ["a"],
"removed": []
}
]"""
# Error if style not readable:
if feature.check(["unix-permissions", "no-root"]):
sh % "touch q"
os.chmod("q", 0)
sh % "hg log --style ./q" == r"""
abort: Permission denied: ./q
(current process runs with uid 42)
(./q: mode 0o52, uid 42, gid 42)
(.: mode 0o52, uid 42, gid 42)
[255]"""
# Error if no style:
sh % "hg log --style notexist" == r"""
abort: style 'notexist' not found
(available styles: bisect, changelog, compact, default, phases, show, status, xml)
[255]"""
sh % "hg log -T list" == r"""
available styles: bisect, changelog, compact, default, phases, show, status, xml
abort: specify a template
[255]"""
# Error if style missing key:
sh % "echo 'q = q'" > "t"
sh % "hg log --style ./t" == r"""
abort: "changeset" not in template map
[255]"""
# Error if style missing value:
sh % "echo 'changeset ='" > "t"
sh % "hg log --style t" == r"""
hg: parse error at t:1: missing value
[255]"""
# Error if include fails:
sh % "echo 'changeset = q'" >> "t"
if feature.check(["unix-permissions", "no-root"]):
sh % "hg log --style ./t" == r"""
abort: template file ./q: Permission denied
[255]"""
sh % "rm -f q"
# Include works:
sh % "echo '{rev}'" > "q"
sh % "hg log --style ./t" == r"""
8
7
6
5
4
3
2
1
0"""
# Check that recursive reference does not fall into RuntimeError (issue4758):
# common mistake:
sh % "cat" << r"""
changeset = '{changeset}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'changeset' in template
[255]"""
# circular reference:
sh % "cat" << r"""
changeset = '{foo}'
foo = '{changeset}'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'foo' in template
[255]"""
# buildmap() -> gettemplate(), where no thunk was made:
sh % "cat" << r"""
changeset = '{files % changeset}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'changeset' in template
[255]"""
# not a recursion if a keyword of the same name exists:
sh % "cat" << r"""
changeset = '{bookmarks % rev}'
rev = '{rev} {bookmark}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758 -r tip" == ""
# Check that {phase} works correctly on parents:
sh % "cat" << r"""
changeset_debug = '{rev} ({phase}):{parents}\n'
parent = ' {rev} ({phase})'
""" > "parentphase"
sh % "hg debugmakepublic 5"
sh % "hg log --debug -G --style ./parentphase" == r"""
@ 8 (draft): 7 (draft)
│
o 7 (draft): -1 (public)
o 6 (draft): 5 (public) 4 (draft)
├─╮
│ o 5 (public): 3 (public)
│ │
o │ 4 (draft): 3 (public)
├─╯
o 3 (public): 2 (public)
│
o 2 (public): 1 (public)
│
o 1 (public): 0 (public)
│
o 0 (public): -1 (public)"""
# Missing non-standard names give no error (backward compatibility):
sh % "echo 'changeset = '\\''{c}'\\'''" > "t"
sh % "hg log --style ./t"
# Defining non-standard name works:
sh % "cat" << r"""
changeset = '{c}'
c = q
""" > "t"
sh % "hg log --style ./t" == r"""
8
7
6
5
4
3
2
1
0"""
# ui.style works:
sh % "echo '[ui]'" > ".hg/hgrc"
sh % "echo 'style = t'" >> ".hg/hgrc"
sh % "hg log" == r"""
8
7
6
5
4
3
2
1
0"""
# Issue338:
sh % "hg log '--style=changelog'" > "changelog"
sh % "cat changelog" == r"""
2020-01-01 test <test>
* fourth, second, third:
third
[209edb6a1848]
1970-01-12 User Name <user@hostname>
* second:
second
[88058a185da2]
1970-01-18 person <person>
* merge
[f7e5795620e7]
* d:
new head
[13207e5a10d9]
1970-01-17 person <person>
* new branch
[07fa1db10648]
1970-01-16 person <person>
* c:
no user, no domain
[10e46f2dcbf4]
1970-01-14 other <other@place>
* c:
no person
[97054abb4ab8]
1970-01-13 A. N. Other <other@place>
* b:
other 1 other 2
other 3
[b608e9d1a3f0]
1970-01-12 User Name <user@hostname>
* a:
line 1 line 2
[1e4e1b8f71e0]"""
# Issue2130: xml output for 'hg heads' is malformed
sh % "hg heads --style changelog" == r"""
2020-01-01 test <test>
* fourth, second, third:
third
[209edb6a1848]
1970-01-18 person <person>
* merge
[f7e5795620e7]"""
# Keys work:
out = []
for (
key
) in "author branch branches date desc file_adds file_dels file_mods file_copies file_copies_switch files manifest node parents rev diffstat extras p1rev p2rev p1node p2node".split():
for mode in ["", "--verbose", "--debug"]:
args = ["log", mode, "-T", "%s%s: {%s}\\n" % (key, mode, key)]
out += [l.strip() for l in sh.hg(*args).splitlines()]
eq(
"\n".join(out),
r"""
author: test
author: User Name <user@hostname>
author: person
author: person
author: person
author: person
author: other@place
author: A. N. Other <other@place>
author: User Name <user@hostname>
author--verbose: test
author--verbose: User Name <user@hostname>
author--verbose: person
author--verbose: person
author--verbose: person
author--verbose: person
author--verbose: other@place
author--verbose: A. N. Other <other@place>
author--verbose: User Name <user@hostname>
author--debug: test
author--debug: User Name <user@hostname>
author--debug: person
author--debug: person
author--debug: person
author--debug: person
author--debug: other@place
author--debug: A. N. Other <other@place>
author--debug: User Name <user@hostname>
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
date: 1577872860.00
date: 1000000.00
date: 1500001.00
date: 1500000.00
date: 1400000.00
date: 1300000.00
date: 1200000.00
date: 1100000.00
date: 1000000.00
date--verbose: 1577872860.00
date--verbose: 1000000.00
date--verbose: 1500001.00
date--verbose: 1500000.00
date--verbose: 1400000.00
date--verbose: 1300000.00
date--verbose: 1200000.00
date--verbose: 1100000.00
date--verbose: 1000000.00
date--debug: 1577872860.00
date--debug: 1000000.00
date--debug: 1500001.00
date--debug: 1500000.00
date--debug: 1400000.00
date--debug: 1300000.00
date--debug: 1200000.00
date--debug: 1100000.00
date--debug: 1000000.00
desc: third
desc: second
desc: merge
desc: new head
desc: new branch
desc: no user, no domain
desc: no person
desc: other 1
other 2
other 3
desc: line 1
line 2
desc--verbose: third
desc--verbose: second
desc--verbose: merge
desc--verbose: new head
desc--verbose: new branch
desc--verbose: no user, no domain
desc--verbose: no person
desc--verbose: other 1
other 2
other 3
desc--verbose: line 1
line 2
desc--debug: third
desc--debug: second
desc--debug: merge
desc--debug: new head
desc--debug: new branch
desc--debug: no user, no domain
desc--debug: no person
desc--debug: other 1
other 2
other 3
desc--debug: line 1
line 2
file_adds: fourth third
file_adds: second
file_adds:
file_adds: d
file_adds:
file_adds:
file_adds: c
file_adds: b
file_adds: a
file_adds--verbose: fourth third
file_adds--verbose: second
file_adds--verbose:
file_adds--verbose: d
file_adds--verbose:
file_adds--verbose:
file_adds--verbose: c
file_adds--verbose: b
file_adds--verbose: a
file_adds--debug: fourth third
file_adds--debug: second
file_adds--debug:
file_adds--debug: d
file_adds--debug:
file_adds--debug:
file_adds--debug: c
file_adds--debug: b
file_adds--debug: a
file_dels: second
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels--verbose: second
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--debug: second
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_mods:
file_mods:
file_mods:
file_mods:
file_mods:
file_mods: c
file_mods:
file_mods:
file_mods:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose: c
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug: c
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_copies: fourth (second)
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies--verbose: fourth (second)
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--debug: fourth (second)
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
files: fourth second third
files: second
files:
files: d
files:
files: c
files: c
files: b
files: a
files--verbose: fourth second third
files--verbose: second
files--verbose:
files--verbose: d
files--verbose:
files--verbose: c
files--verbose: c
files--verbose: b
files--verbose: a
files--debug: fourth second third
files--debug: second
files--debug:
files--debug: d
files--debug:
files--debug: c
files--debug: c
files--debug: b
files--debug: a
manifest: 102f85d65468
manifest: e3aa144e25d9
manifest: 4dc3def4f9b4
manifest: 4dc3def4f9b4
manifest: cb5a1327723b
manifest: cb5a1327723b
manifest: 6e0e82995c35
manifest: 4e8d705b1e53
manifest: a0c8bcbbb45c
manifest--verbose: 102f85d65468
manifest--verbose: e3aa144e25d9
manifest--verbose: 4dc3def4f9b4
manifest--verbose: 4dc3def4f9b4
manifest--verbose: cb5a1327723b
manifest--verbose: cb5a1327723b
manifest--verbose: 6e0e82995c35
manifest--verbose: 4e8d705b1e53
manifest--verbose: a0c8bcbbb45c
manifest--debug: 102f85d6546830d0894e5420cdddaa12fe270c02
manifest--debug: e3aa144e25d914ea34006bd7b3c266b7eb283c61
manifest--debug: 4dc3def4f9b4c6e8de820f6ee74737f91e96a216
manifest--debug: 4dc3def4f9b4c6e8de820f6ee74737f91e96a216
manifest--debug: cb5a1327723bada42f117e4c55a303246eaf9ccc
manifest--debug: cb5a1327723bada42f117e4c55a303246eaf9ccc
manifest--debug: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1
manifest--debug: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55
manifest--debug: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0
node: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node: f7e5795620e78993ad76680c4306bb2da83907b3
node: 13207e5a10d9fd28ec424934298e176197f2c67f
node: 07fa1db1064879a32157227401eb44b322ae53ce
node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node: 97054abb4ab824450e9164180baf491ae0078465
node: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node: 1e4e1b8f71e05681d422154f5421e385fec3454f
node--verbose: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node--verbose: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node--verbose: f7e5795620e78993ad76680c4306bb2da83907b3
node--verbose: 13207e5a10d9fd28ec424934298e176197f2c67f
node--verbose: 07fa1db1064879a32157227401eb44b322ae53ce
node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node--verbose: 97054abb4ab824450e9164180baf491ae0078465
node--verbose: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node--verbose: 1e4e1b8f71e05681d422154f5421e385fec3454f
node--debug: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node--debug: f7e5795620e78993ad76680c4306bb2da83907b3
node--debug: 13207e5a10d9fd28ec424934298e176197f2c67f
node--debug: 07fa1db1064879a32157227401eb44b322ae53ce
node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node--debug: 97054abb4ab824450e9164180baf491ae0078465
node--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
parents: 88058a185da2
parents: 000000000000
parents: 13207e5a10d9 07fa1db10648
parents: 10e46f2dcbf4
parents: 10e46f2dcbf4
parents: 97054abb4ab8
parents: b608e9d1a3f0
parents: 1e4e1b8f71e0
parents: 000000000000
parents--verbose: 88058a185da2
parents--verbose: 000000000000
parents--verbose: 13207e5a10d9 07fa1db10648
parents--verbose: 10e46f2dcbf4
parents--verbose: 10e46f2dcbf4
parents--verbose: 97054abb4ab8
parents--verbose: b608e9d1a3f0
parents--verbose: 1e4e1b8f71e0
parents--verbose: 000000000000
parents--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
parents--debug: 0000000000000000000000000000000000000000
parents--debug: 13207e5a10d9fd28ec424934298e176197f2c67f 07fa1db1064879a32157227401eb44b322ae53ce
parents--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
parents--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
parents--debug: 97054abb4ab824450e9164180baf491ae0078465
parents--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
parents--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
parents--debug: 0000000000000000000000000000000000000000
rev: 8
rev: 7
rev: 6
rev: 5
rev: 4
rev: 3
rev: 2
rev: 1
rev: 0
rev--verbose: 8
rev--verbose: 7
rev--verbose: 6
rev--verbose: 5
rev--verbose: 4
rev--verbose: 3
rev--verbose: 2
rev--verbose: 1
rev--verbose: 0
rev--debug: 8
rev--debug: 7
rev--debug: 6
rev--debug: 5
rev--debug: 4
rev--debug: 3
rev--debug: 2
rev--debug: 1
rev--debug: 0
diffstat: 3: +2/-1
diffstat: 1: +1/-0
diffstat: 0: +0/-0
diffstat: 1: +1/-0
diffstat: 0: +0/-0
diffstat: 1: +1/-0
diffstat: 1: +4/-0
diffstat: 1: +2/-0
diffstat: 1: +1/-0
diffstat--verbose: 3: +2/-1
diffstat--verbose: 1: +1/-0
diffstat--verbose: 0: +0/-0
diffstat--verbose: 1: +1/-0
diffstat--verbose: 0: +0/-0
diffstat--verbose: 1: +1/-0
diffstat--verbose: 1: +4/-0
diffstat--verbose: 1: +2/-0
diffstat--verbose: 1: +1/-0
diffstat--debug: 3: +2/-1
diffstat--debug: 1: +1/-0
diffstat--debug: 0: +0/-0
diffstat--debug: 1: +1/-0
diffstat--debug: 0: +0/-0
diffstat--debug: 1: +1/-0
diffstat--debug: 1: +4/-0
diffstat--debug: 1: +2/-0
diffstat--debug: 1: +1/-0
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
p1rev: 7
p1rev: -1
p1rev: 5
p1rev: 3
p1rev: 3
p1rev: 2
p1rev: 1
p1rev: 0
p1rev: -1
p1rev--verbose: 7
p1rev--verbose: -1
p1rev--verbose: 5
p1rev--verbose: 3
p1rev--verbose: 3
p1rev--verbose: 2
p1rev--verbose: 1
p1rev--verbose: 0
p1rev--verbose: -1
p1rev--debug: 7
p1rev--debug: -1
p1rev--debug: 5
p1rev--debug: 3
p1rev--debug: 3
p1rev--debug: 2
p1rev--debug: 1
p1rev--debug: 0
p1rev--debug: -1
p2rev: -1
p2rev: -1
p2rev: 4
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: 4
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: 4
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p1node: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node: 0000000000000000000000000000000000000000
p1node: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node: 97054abb4ab824450e9164180baf491ae0078465
p1node: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node: 0000000000000000000000000000000000000000
p1node--verbose: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node--verbose: 0000000000000000000000000000000000000000
p1node--verbose: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--verbose: 97054abb4ab824450e9164180baf491ae0078465
p1node--verbose: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node--verbose: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node--verbose: 0000000000000000000000000000000000000000
p1node--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node--debug: 0000000000000000000000000000000000000000
p1node--debug: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--debug: 97054abb4ab824450e9164180baf491ae0078465
p1node--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node--debug: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 07fa1db1064879a32157227401eb44b322ae53ce
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 07fa1db1064879a32157227401eb44b322ae53ce
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 07fa1db1064879a32157227401eb44b322ae53ce
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000""",
)
# Filters work:
sh % "hg log --template '{author|domain}\\n'" == r"""
hostname
place
place
hostname"""
sh % "hg log --template '{author|person}\\n'" == r"""
test
User Name
person
person
person
person
other
A. N. Other
User Name"""
sh % "hg log --template '{author|user}\\n'" == r"""
test
user
person
person
person
person
other
other
user"""
sh % "hg log --template '{date|date}\\n'" == r"""
Wed Jan 01 10:01:00 2020 +0000
Mon Jan 12 13:46:40 1970 +0000
Sun Jan 18 08:40:01 1970 +0000
Sun Jan 18 08:40:00 1970 +0000
Sat Jan 17 04:53:20 1970 +0000
Fri Jan 16 01:06:40 1970 +0000
Wed Jan 14 21:20:00 1970 +0000
Tue Jan 13 17:33:20 1970 +0000
Mon Jan 12 13:46:40 1970 +0000"""
sh % "hg log --template '{date|isodate}\\n'" == r"""
2020-01-01 10:01 +0000
1970-01-12 13:46 +0000
1970-01-18 08:40 +0000
1970-01-18 08:40 +0000
1970-01-17 04:53 +0000
1970-01-16 01:06 +0000
1970-01-14 21:20 +0000
1970-01-13 17:33 +0000
1970-01-12 13:46 +0000"""
sh % "hg log --template '{date|isodatesec}\\n'" == r"""
2020-01-01 10:01:00 +0000
1970-01-12 13:46:40 +0000
1970-01-18 08:40:01 +0000
1970-01-18 08:40:00 +0000
1970-01-17 04:53:20 +0000
1970-01-16 01:06:40 +0000
1970-01-14 21:20:00 +0000
1970-01-13 17:33:20 +0000
1970-01-12 13:46:40 +0000"""
sh % "hg log --template '{date|rfc822date}\\n'" == r"""
Wed, 01 Jan 2020 10:01:00 +0000
Mon, 12 Jan 1970 13:46:40 +0000
Sun, 18 Jan 1970 08:40:01 +0000
Sun, 18 Jan 1970 08:40:00 +0000
Sat, 17 Jan 1970 04:53:20 +0000
Fri, 16 Jan 1970 01:06:40 +0000
Wed, 14 Jan 1970 21:20:00 +0000
Tue, 13 Jan 1970 17:33:20 +0000
Mon, 12 Jan 1970 13:46:40 +0000"""
sh % "hg log --template '{desc|firstline}\\n'" == r"""
third
second
merge
new head
new branch
no user, no domain
no person
other 1
line 1"""
sh % "hg log --template '{node|short}\\n'" == r"""
209edb6a1848
88058a185da2
f7e5795620e7
13207e5a10d9
07fa1db10648
10e46f2dcbf4
97054abb4ab8
b608e9d1a3f0
1e4e1b8f71e0"""
sh % "hg log --template '<changeset author=\"{author|xmlescape}\"/>\n'" == r"""
<changeset author="test"/>
<changeset author="User Name <user@hostname>"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="other@place"/>
<changeset author="A. N. Other <other@place>"/>
<changeset author="User Name <user@hostname>"/>"""
sh % "hg log --template '{rev}: {children}\\n'" == r"""
8: (trailing space)
7: 209edb6a1848
6: (trailing space)
5: f7e5795620e7
4: f7e5795620e7
3: 07fa1db10648 13207e5a10d9
2: 10e46f2dcbf4
1: 97054abb4ab8
0: b608e9d1a3f0"""
# Formatnode filter works:
sh % "hg -q log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg -v log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg --debug log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e05681d422154f5421e385fec3454f"
# Age filter:
sh % "hg init unstable-hash"
sh % "cd unstable-hash"
sh % "hg log --template '{date|age}\\n' '||' exit 1" > "/dev/null"
n = datetime.datetime.now() + datetime.timedelta(366 * 7)
s = "%d-%d-%d 00:00" % (n.year, n.month, n.day)
open("a", "wb").write(pycompat.encodeutf8(s))
sh % "hg add a"
sh % ("hg commit -m future -d '%s UTC'" % s)
sh % "hg log -l1 --template '{date|age}\\n'" == "7 years from now"
sh % "cd .."
# Add a dummy commit to make up for the instability of the above:
sh % "echo a" > "a"
sh % "hg add a"
sh % "hg ci -m future"
# Count filter:
sh % "hg log -l1 --template '{node|count} {node|short|count}\\n'" == "40 12"
sh % 'hg log -l1 --template \'{revset("null^")|count} {revset(".")|count} {revset("0::3")|count}\\n\'' == "0 1 4"
sh % "hg log -G --template '{rev}: children: {children|count}, file_adds: {file_adds|count}, ancestors: {revset(\"ancestors(%s)\", rev)|count}'" == r"""
@ 9: children: 0, file_adds: 1, ancestors: 3
│
o 8: children: 1, file_adds: 2, ancestors: 2
│
o 7: children: 1, file_adds: 1, ancestors: 1
o 6: children: 0, file_adds: 0, ancestors: 7
├─╮
│ o 5: children: 1, file_adds: 1, ancestors: 5
│ │
o │ 4: children: 1, file_adds: 0, ancestors: 5
├─╯
o 3: children: 2, file_adds: 0, ancestors: 4
│
o 2: children: 1, file_adds: 1, ancestors: 3
│
o 1: children: 1, file_adds: 1, ancestors: 2
│
o 0: children: 1, file_adds: 1, ancestors: 1"""
# Upper/lower filters:
sh % "hg log -r0 --template '{author|upper}\\n'" == "USER NAME <USER@HOSTNAME>"
sh % "hg log -r0 --template '{author|lower}\\n'" == "user name <user@hostname>"
sh % "hg log -r0 --template '{date|upper}\\n'" == r"""
abort: template filter 'upper' is not compatible with keyword 'date'
[255]"""
# Add a commit that does all possible modifications at once
sh % "echo modify" >> "third"
sh % "touch b"
sh % "hg add b"
sh % "hg mv fourth fifth"
sh % "hg rm a"
sh % "hg ci -m 'Modify, add, remove, rename'"
# Check the status template
(
sh % "cat"
<< r"""
[extensions]
color=
"""
>> "$HGRCPATH"
)
sh % "hg log -T status -r 10" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: Modify, add, remove, rename
files:
M third
A b
A fifth
R a
R fourth"""
sh % "hg log -T status -C -r 10" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 -v" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
description:
Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 --debug" == r"""
commit: bc9dfec3b3bcc43c41a22000f3226b0c1085d5c1
phase: draft
manifest: 1685af69a14aa2346cfb01cf0e7f50ef176128b4
user: test
date: Thu Jan 01 00:00:00 1970 +0000
extra: branch=default
description:
Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 --quiet" == "bc9dfec3b3bc"
sh % "hg '--color=debug' log -T status -r 10" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[log.summary|summary: Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[log.summary|summary: Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 -v" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[ui.note log.description|description:]
[ui.note log.description|Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 --debug" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bcc43c41a22000f3226b0c1085d5c1]
[log.phase|phase: draft]
[ui.debug log.manifest|manifest: 1685af69a14aa2346cfb01cf0e7f50ef176128b4]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 --quiet" == "[log.node|bc9dfec3b3bc]"
# Check the bisect template
sh % "hg bisect -g 1"
sh % "hg bisect -b 3 --noupdate" == "Testing changeset 97054abb4ab8 (2 changesets remaining, ~1 tests)"
sh % "hg log -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e0
bisect: good (implicit)
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
summary: line 1
commit: b608e9d1a3f0
bisect: good
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
summary: other 1
commit: 97054abb4ab8
bisect: untested
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
summary: no person
commit: 10e46f2dcbf4
bisect: bad
user: person
date: Fri Jan 16 01:06:40 1970 +0000
summary: no user, no domain
commit: 07fa1db10648
bisect: bad (implicit)
bookmark: foo
user: person
date: Sat Jan 17 04:53:20 1970 +0000
summary: new branch"""
sh % "hg log --debug -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e05681d422154f5421e385fec3454f
bisect: good (implicit)
phase: public
manifest: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
files+: a
extra: branch=default
description:
line 1
line 2
commit: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
bisect: good
phase: public
manifest: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
files+: b
extra: branch=default
description:
other 1
other 2
other 3
commit: 97054abb4ab824450e9164180baf491ae0078465
bisect: untested
phase: public
manifest: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
files+: c
extra: branch=default
description:
no person
commit: 10e46f2dcbf4823578cf180f33ecf0b957964c47
bisect: bad
phase: public
manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc
user: person
date: Fri Jan 16 01:06:40 1970 +0000
files: c
extra: branch=default
description:
no user, no domain
commit: 07fa1db1064879a32157227401eb44b322ae53ce
bisect: bad (implicit)
bookmark: foo
phase: draft
manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc
user: person
date: Sat Jan 17 04:53:20 1970 +0000
extra: branch=default
description:
new branch"""
sh % "hg log -v -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e0
bisect: good (implicit)
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
files: a
description:
line 1
line 2
commit: b608e9d1a3f0
bisect: good
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
files: b
description:
other 1
other 2
other 3
commit: 97054abb4ab8
bisect: untested
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
files: c
description:
no person
commit: 10e46f2dcbf4
bisect: bad
user: person
date: Fri Jan 16 01:06:40 1970 +0000
files: c
description:
no user, no domain
commit: 07fa1db10648
bisect: bad (implicit)
bookmark: foo
user: person
date: Sat Jan 17 04:53:20 1970 +0000
description:
new branch"""
sh % "hg '--color=debug' log -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e0]
[log.bisect bisect.good|bisect: good (implicit)]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[log.summary|summary: line 1]
[log.changeset changeset.public|commit: b608e9d1a3f0]
[log.bisect bisect.good|bisect: good]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[log.summary|summary: other 1]
[log.changeset changeset.public|commit: 97054abb4ab8]
[log.bisect bisect.untested|bisect: untested]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[log.summary|summary: no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4]
[log.bisect bisect.bad|bisect: bad]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[log.summary|summary: no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db10648]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[log.summary|summary: new branch]"""
sh % "hg '--color=debug' log --debug -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e05681d422154f5421e385fec3454f]
[log.bisect bisect.good|bisect: good (implicit)]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[ui.debug log.files|files+: a]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|line 1
line 2]
[log.changeset changeset.public|commit: b608e9d1a3f0273ccf70fb85fd6866b3482bf965]
[log.bisect bisect.good|bisect: good]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[ui.debug log.files|files+: b]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|other 1
other 2
other 3]
[log.changeset changeset.public|commit: 97054abb4ab824450e9164180baf491ae0078465]
[log.bisect bisect.untested|bisect: untested]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[ui.debug log.files|files+: c]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4823578cf180f33ecf0b957964c47]
[log.bisect bisect.bad|bisect: bad]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[ui.debug log.files|files: c]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db1064879a32157227401eb44b322ae53ce]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.phase|phase: draft]
[ui.debug log.manifest|manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|new branch]"""
sh % "hg '--color=debug' log -v -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e0]
[log.bisect bisect.good|bisect: good (implicit)]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[ui.note log.files|files: a]
[ui.note log.description|description:]
[ui.note log.description|line 1
line 2]
[log.changeset changeset.public|commit: b608e9d1a3f0]
[log.bisect bisect.good|bisect: good]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[ui.note log.files|files: b]
[ui.note log.description|description:]
[ui.note log.description|other 1
other 2
other 3]
[log.changeset changeset.public|commit: 97054abb4ab8]
[log.bisect bisect.untested|bisect: untested]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[ui.note log.files|files: c]
[ui.note log.description|description:]
[ui.note log.description|no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4]
[log.bisect bisect.bad|bisect: bad]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[ui.note log.files|files: c]
[ui.note log.description|description:]
[ui.note log.description|no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db10648]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[ui.note log.description|description:]
[ui.note log.description|new branch]"""
sh % "hg bisect --reset"
# Error on syntax:
sh % "echo 'x = \"f'" >> "t"
sh % "hg log" == r"""
hg: parse error at t:3: unmatched quotes
[255]"""
sh % "hg log -T '{date'" == r"""
hg: parse error at 1: unterminated template expansion
({date
^ here)
[255]"""
# Behind the scenes, this will throw TypeError
sh % "hg log -l 3 --template '{date|obfuscate}\\n'" == r"""
abort: template filter 'obfuscate' is not compatible with keyword 'date'
[255]"""
# Behind the scenes, this will throw a ValueError
sh % "hg log -l 3 --template 'line: {desc|shortdate}\\n'" == r"""
abort: template filter 'shortdate' is not compatible with keyword 'desc'
[255]"""
# Behind the scenes, this will throw AttributeError
sh % "hg log -l 3 --template 'line: {date|escape}\\n'" == r"""
abort: template filter 'escape' is not compatible with keyword 'date'
[255]"""
sh % "hg log -l 3 --template 'line: {extras|localdate}\\n'" == r"""
hg: parse error: localdate expects a date information
[255]"""
# Behind the scenes, this will throw ValueError
sh % "hg tip --template '{author|email|date}\\n'" == r"""
hg: parse error: date expects a date information
[255]"""
sh % "hg tip -T '{author|email|shortdate}\\n'" == r"""
abort: template filter 'shortdate' is not compatible with keyword 'author'
[255]"""
sh % "hg tip -T '{get(extras, \"branch\")|shortdate}\\n'" == r"""
abort: incompatible use of template filter 'shortdate'
[255]"""
# Error in nested template:
sh % "hg log -T '{\"date'" == r"""
hg: parse error at 2: unterminated string
({"date
^ here)
[255]"""
sh % "hg log -T '{\"foo{date|?}\"}'" == r"""
hg: parse error at 11: syntax error
({"foo{date|?}"}
^ here)
[255]"""
# Thrown an error if a template function doesn't exist
sh % "hg tip --template '{foo()}\\n'" == r"""
hg: parse error: unknown function 'foo'
[255]"""
# Pass generator object created by template function to filter
sh % "hg log -l 1 --template '{if(author, author)|user}\\n'" == "test"
# Test index keyword:
sh % "hg log -l 2 -T '{index + 10}{files % \" {index}:{file}\"}\\n'" == r"""
10 0:a 1:b 2:fifth 3:fourth 4:third
11 0:a"""
# Test diff function:
sh % "hg diff -c 8" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff()}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff('\\''glob:f*'\\'')}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
)
sh % "hg log -r 8 -T '{diff('\\'''\\'', '\\''glob:f*'\\'')}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff('\\''FOURTH'\\''|lower)}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
)
sh % "hg log -r 8 -T '{diff()|json}'" == '"diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+\\ud83e\\udd48\\udce2(\\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-\\ud83e\\udd48\\udce2(\\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"'
# ui verbosity:
sh % "hg log -l1 -T '{verbosity}\\n'"
sh % "hg log -l1 -T '{verbosity}\\n' --debug" == "debug"
sh % "hg log -l1 -T '{verbosity}\\n' --quiet" == "quiet"
sh % "hg log -l1 -T '{verbosity}\\n' --verbose" == "verbose"
sh % "cd .."
# latesttag:
sh % "hg init latesttag"
sh % "cd latesttag"
sh % "echo a" > "file"
sh % "hg ci -Am a -d '0 0'" == "adding file"
sh % "echo b" >> "file"
sh % "hg ci -m b -d '1 0'"
sh % "echo c" >> "head1"
sh % "hg ci -Am h1c -d '2 0'" == "adding head1"
sh % "hg update -q 1"
sh % "echo d" >> "head2"
sh % "hg ci -Am h2d -d '3 0'" == "adding head2"
sh % "echo e" >> "head2"
sh % "hg ci -m h2e -d '4 0'"
sh % "hg merge -q"
sh % "hg ci -m merge -d '5 -3600'"
sh % "cd .."
# Style path expansion: issue1948 - ui.style option doesn't work on OSX
# if it is a relative path
sh % "mkdir -p $TESTTMP/home/styles"
sh % "cat" << r"""
changeset = 'test {rev}:{node|short}\n'
""" > "$TESTTMP/home/styles/teststyle"
sh % "cat" << r"""
[ui]
style = $TESTTMP/home/styles/teststyle
""" > "latesttag/.hg/hgrc"
sh % "hg -R latesttag tip" == "test 5:888bdaa97ddd"
# Test recursive showlist template (issue1989):
sh % "cat" << r"""
changeset = '{file_mods}{manifest}{extras}'
file_mod = 'M|{author|person}\n'
manifest = '{rev},{author}\n'
extra = '{key}: {author}\n'
""" > "style1989"
sh % "hg -R latesttag log -r tip^ '--style=style1989'" == r"""
M|test
4,test
branch: test"""
# Test new-style inline templating:
sh % "hg log -R latesttag -r tip^ --template 'modified files: {file_mods % \" {file}\\n\"}\\n'" == "modified files: head2"
sh % "hg log -R latesttag -r tip^ -T '{rev % \"a\"}\\n'" == r"""
hg: parse error: keyword 'rev' is not iterable
[255]"""
sh % 'hg log -R latesttag -r tip^ -T \'{get(extras, "unknown") % "a"}\\n\'' == r"""
hg: parse error: None is not iterable
[255]"""
# Test new-style inline templating of non-list/dict type:
sh % "hg log -R latesttag -r tip -T '{manifest}\\n'" == "ed2d5d416a51"
sh % "hg log -R latesttag -r tip -T 'string length: {manifest|count}\\n'" == "string length: 12"
sh % "hg log -R latesttag -r tip -T '{manifest % \"{rev}:{node}\"}\\n'" == "5:ed2d5d416a513f3f19ab4cd41c793dcd8272a497"
sh % 'hg log -R latesttag -r tip -T \'{get(extras, "branch") % "{key}: {value}\\n"}\'' == "branch: default"
sh % 'hg log -R latesttag -r tip -T \'{get(extras, "unknown") % "{key}\\n"}\'' == r"""
hg: parse error: None is not iterable
[255]"""
sh % "hg log -R latesttag -r tip -T '{min(extras) % \"{key}: {value}\\n\"}'" == "branch: default"
sh % 'hg log -R latesttag -l1 -T \'{min(revset("0:5")) % "{rev}:{node|short}\\n"}\'' == "0:ce3cec86e6c2"
sh % 'hg log -R latesttag -l1 -T \'{max(revset("0:5")) % "{rev}:{node|short}\\n"}\'' == "5:888bdaa97ddd"
# Test manifest/get() can be join()-ed as before, though it's silly:
sh % "hg log -R latesttag -r tip -T '{join(manifest, \"\")}\\n'" == "ed2d5d416a51"
sh % 'hg log -R latesttag -r tip -T \'{join(get(extras, "branch"), "")}\\n\'' == "default"
# Test min/max of integers
sh % "hg log -R latesttag -l1 -T '{min(revset(\"4:5\"))}\\n'" == "4"
sh % "hg log -R latesttag -l1 -T '{max(revset(\"4:5\"))}\\n'" == "5"
# Test dot operator precedence:
sh % "hg debugtemplate -R latesttag -r0 -v '{manifest.node|short}\\n'" == r"""
(template
(|
(.
(symbol 'manifest')
(symbol 'node'))
(symbol 'short'))
(string '\n'))
89f4071fec70"""
# (the following examples are invalid, but seem natural in parsing POV)
sh % "hg debugtemplate -R latesttag -r0 -v '{foo|bar.baz}\\n'" == r"""
(template
(|
(symbol 'foo')
(.
(symbol 'bar')
(symbol 'baz')))
(string '\n'))
hg: parse error: expected a symbol, got '.'
[255]"""
sh % "hg debugtemplate -R latesttag -r0 -v '{foo.bar()}\\n'" == r"""
(template
(.
(symbol 'foo')
(func
(symbol 'bar')
None))
(string '\n'))
hg: parse error: expected a symbol, got 'func'
[255]"""
# Test evaluation of dot operator:
sh % "hg log -R latesttag -l1 -T '{min(revset(\"0:9\")).node}\\n'" == "ce3cec86e6c26bd9bdfc590a6b92abc9680f1796"
sh % "hg log -R latesttag -r0 -T '{extras.branch}\\n'" == "default"
sh % "hg log -R latesttag -l1 -T '{author.invalid}\\n'" == r"""
hg: parse error: keyword 'author' has no member
[255]"""
sh % "hg log -R latesttag -l1 -T '{min(\"abc\").invalid}\\n'" == r"""
hg: parse error: 'a' has no member
[255]"""
# Test the sub function of templating for expansion:
sh % 'hg log -R latesttag -r 5 --template \'{sub("[0-9]", "x", "{rev}")}\\n\'' == "x"
sh % 'hg log -R latesttag -r 5 -T \'{sub("[", "x", rev)}\\n\'' == r"""
hg: parse error: sub got an invalid pattern: [
[255]"""
sh % 'hg log -R latesttag -r 5 -T \'{sub("[0-9]", r"\\1", rev)}\\n\'' == r"""
hg: parse error: sub got an invalid replacement: \1
[255]"""
# Test the strip function with chars specified:
sh % "hg log -R latesttag --template '{desc}\\n'" == r"""
merge
h2e
h2d
h1c
b
a"""
sh % "hg log -R latesttag --template '{strip(desc, \"te\")}\\n'" == r"""
merg
h2
h2d
h1c
b
a"""
# Test date format:
sh % "hg log -R latesttag --template 'date: {date(date, \"%y %m %d %S %z\")}\\n'" == r"""
date: 70 01 01 05 +0100
date: 70 01 01 04 +0000
date: 70 01 01 03 +0000
date: 70 01 01 02 +0000
date: 70 01 01 01 +0000
date: 70 01 01 00 +0000"""
# Test invalid date:
sh % "hg log -R latesttag -T '{date(rev)}\\n'" == r"""
hg: parse error: date expects a date information
[255]"""
# Test integer literal:
sh % "hg debugtemplate -v '{(0)}\\n'" == r"""
(template
(group
(integer '0'))
(string '\n'))
0"""
sh % "hg debugtemplate -v '{(123)}\\n'" == r"""
(template
(group
(integer '123'))
(string '\n'))
123"""
sh % "hg debugtemplate -v '{(-4)}\\n'" == r"""
(template
(group
(negate
(integer '4')))
(string '\n'))
-4"""
sh % "hg debugtemplate '{(-)}\\n'" == r"""
hg: parse error at 3: not a prefix: )
({(-)}\n
^ here)
[255]"""
sh % "hg debugtemplate '{(-a)}\\n'" == r"""
hg: parse error: negation needs an integer argument
[255]"""
# top-level integer literal is interpreted as symbol (i.e. variable name):
sh % "hg debugtemplate -D '1=one' -v '{1}\\n'" == r"""
(template
(integer '1')
(string '\n'))
one"""
sh % "hg debugtemplate -D '1=one' -v '{if(\"t\", \"{1}\")}\\n'" == r"""
(template
(func
(symbol 'if')
(list
(string 't')
(template
(integer '1'))))
(string '\n'))
one"""
sh % "hg debugtemplate -D '1=one' -v '{1|stringify}\\n'" == r"""
(template
(|
(integer '1')
(symbol 'stringify'))
(string '\n'))
one"""
# unless explicit symbol is expected:
sh % "hg log -Ra -r0 -T '{desc|1}\\n'" == r"""
hg: parse error: expected a symbol, got 'integer'
[255]"""
sh % "hg log -Ra -r0 -T '{1()}\\n'" == r"""
hg: parse error: expected a symbol, got 'integer'
[255]"""
# Test string literal:
sh % "hg debugtemplate -Ra -r0 -v '{\"string with no template fragment\"}\\n'" == r"""
(template
(string 'string with no template fragment')
(string '\n'))
string with no template fragment"""
sh % "hg debugtemplate -Ra -r0 -v '{\"template: {rev}\"}\\n'" == r"""
(template
(template
(string 'template: ')
(symbol 'rev'))
(string '\n'))
template: 0"""
sh % "hg debugtemplate -Ra -r0 -v '{r\"rawstring: {rev}\"}\\n'" == r"""
(template
(string 'rawstring: {rev}')
(string '\n'))
rawstring: {rev}"""
sh % "hg debugtemplate -Ra -r0 -v '{files % r\"rawstring: {file}\"}\\n'" == r"""
(template
(%
(symbol 'files')
(string 'rawstring: {file}'))
(string '\n'))
rawstring: {file}"""
# Test string escaping:
sh % "hg log -R latesttag -r 0 --template '>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "hg log -R latesttag -r 0 --config 'ui.logtemplate=>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "hg log -R latesttag -r 0 -T esc --config 'templates.esc=>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "cat" << r"""
changeset = '>\n<>\\n<{if(rev, "[>\n<>\\n<]")}>\n<>\\n<\n'
""" > "esctmpl"
sh % "hg log -R latesttag -r 0 --style ./esctmpl" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
# Test string escaping of quotes:
sh % 'hg log -Ra -r0 -T \'{"\\""}\\n\'' == '"'
sh % 'hg log -Ra -r0 -T \'{"\\\\\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\\\\\""}\\n\'' == '\\\\\\"'
sh % 'hg log -Ra -r0 -T \'{"\\""}\\n\'' == '"'
sh % 'hg log -Ra -r0 -T \'{"\\\\\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\\\\\""}\\n\'' == '\\\\\\"'
# Test exception in quoted template. single backslash before quotation mark is
# stripped before parsing:
sh % "cat" << r"""
changeset = "\" \\" \\\" \\\\" {files % \"{file}\"}\n"
""" > "escquotetmpl"
sh % "cd latesttag"
sh % "hg log -r 2 --style ../escquotetmpl" == '" \\" \\" \\\\" head1'
sh % 'hg log -r 2 -T esc --config \'templates.esc="{\\"valid\\"}\\n"\'' == "valid"
sh % "hg log -r 2 -T esc --config 'templates.esc='\\''{\\'\\''valid\\'\\''}\\n'\\'''" == "valid"
# Test compatibility with 2.9.2-3.4 of escaped quoted strings in nested
# _evalifliteral() templates (issue4733):
sh % 'hg log -r 2 -T \'{if(rev, "\\"{rev}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"\\\\\\"{rev}\\")}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, \\\\\\"\\\\\\\\\\\\\\"{rev}\\\\\\")}\\")}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "\\\\\\"")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"\\\\\\\\\\\\\\"\\")}")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, \\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\")}\\")}")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, r"\\\\\\"")}\\n\'' == '\\\\\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, r\\"\\\\\\\\\\\\\\"\\")}")}\\n\'' == '\\\\\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, r\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\")}\\")}")}\\n\'' == '\\\\\\"'
# escaped single quotes and errors:
sh % "hg log -r 2 -T '{if(rev, '\\''{if(rev, \\'\\''foo\\'\\'')}'\\'')}\\n'" == "foo"
sh % "hg log -r 2 -T '{if(rev, '\\''{if(rev, r\\'\\''foo\\'\\'')}'\\'')}\\n'" == "foo"
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\")}")}\\n\'' == r"""
hg: parse error at 21: unterminated string
({if(rev, "{if(rev, \")}")}\n
^ here)
[255]"""
sh % 'hg log -r 2 -T \'{if(rev, \\"\\\\"")}\\n\'' == r"""
hg: parse error: trailing \ in string
[255]"""
sh % 'hg log -r 2 -T \'{if(rev, r\\"\\\\"")}\\n\'' == r"""
hg: parse error: trailing \ in string
[255]"""
sh % "cd .."
# Test leading backslashes:
sh % "cd latesttag"
sh % "hg log -r 2 -T '\\{rev} {files % \"\\{file}\"}\\n'" == "{rev} {file}"
sh % "hg log -r 2 -T '\\\\{rev} {files % \"\\\\{file}\"}\\n'" == "\\2 \\head1"
sh % "hg log -r 2 -T '\\\\\\{rev} {files % \"\\\\\\{file}\"}\\n'" == "\\{rev} \\{file}"
sh % "cd .."
# Test leading backslashes in "if" expression (issue4714):
sh % "cd latesttag"
sh % 'hg log -r 2 -T \'{if("1", "\\{rev}")} {if("1", r"\\{rev}")}\\n\'' == "{rev} \\{rev}"
sh % 'hg log -r 2 -T \'{if("1", "\\\\{rev}")} {if("1", r"\\\\{rev}")}\\n\'' == "\\2 \\\\{rev}"
sh % 'hg log -r 2 -T \'{if("1", "\\\\\\{rev}")} {if("1", r"\\\\\\{rev}")}\\n\'' == "\\{rev} \\\\\\{rev}"
sh % "cd .."
# "string-escape"-ed "\x5c\x786e" becomes r"\x6e" (once) or r"n" (twice)
sh % 'hg log -R a -r 0 --template \'{if("1", "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 0 --template \'{if("1", r"\\x5c\\x786e", "NG")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 0 --template \'{if("", "NG", "\\x5c\\x786e")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 0 --template \'{if("", "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 2 --template \'{ifeq("no perso\\x6e", desc, "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 2 --template \'{ifeq(r"no perso\\x6e", desc, "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 2 --template \'{ifeq(desc, "no perso\\x6e", "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 2 --template \'{ifeq(desc, r"no perso\\x6e", "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % "hg log -R a -r 8 --template '{join(files, \"\\n\")}\\n'" == r"""
fourth
second
third"""
sh % "hg log -R a -r 8 --template '{join(files, r\"\\n\")}\\n'" == "fourth\\nsecond\\nthird"
sh % 'hg log -R a -r 2 --template \'{rstdoc("1st\\n\\n2nd", "htm\\x6c")}\'' == r"""
<p>
1st
</p>
<p>
2nd
</p>"""
sh % 'hg log -R a -r 2 --template \'{rstdoc(r"1st\\n\\n2nd", "html")}\'' == r"""
<p>
1st\n\n2nd
</p>"""
sh % 'hg log -R a -r 2 --template \'{rstdoc("1st\\n\\n2nd", r"htm\\x6c")}\'' == r"""
1st
2nd"""
sh % "hg log -R a -r 2 --template '{strip(desc, \"\\x6e\")}\\n'" == "o perso"
sh % "hg log -R a -r 2 --template '{strip(desc, r\"\\x6e\")}\\n'" == "no person"
sh % 'hg log -R a -r 2 --template \'{strip("no perso\\x6e", "\\x6e")}\\n\'' == "o perso"
sh % 'hg log -R a -r 2 --template \'{strip(r"no perso\\x6e", r"\\x6e")}\\n\'' == "no perso"
sh % 'hg log -R a -r 2 --template \'{sub("\\\\x6e", "\\x2d", desc)}\\n\'' == "-o perso-"
sh % 'hg log -R a -r 2 --template \'{sub(r"\\\\x6e", "-", desc)}\\n\'' == "no person"
sh % pycompat.decodeutf8(
b'hg log -R a -r 2 --template \'{sub("n", "\\x2d", "no perso\\x6e")}\\n\''
) == pycompat.decodeutf8(b"-o perso-")
sh % "hg log -R a -r 8 --template '{files % \"{file}\\n\"}'" == r"""
fourth
second
third"""
# Test string escaping in nested expression:
sh % 'hg log -R a -r 8 --template \'{ifeq(r"\\x6e", if("1", "\\x5c\\x786e"), join(files, "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{ifeq(if("1", r"\\x6e"), "\\x5c\\x786e", join(files, "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{join(files, ifeq(branch, "default", "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{join(files, ifeq(branch, "default", r"\\x5c\\x786e"))}\\n\'' == "fourth\\x5c\\x786esecond\\x5c\\x786ethird"
# Test quotes in nested expression are evaluated just like a $(command)
# substitution in POSIX shells:
sh % 'hg log -R a -r 8 -T \'{"{"{rev}:{node|short}"}"}\\n\'' == "8:209edb6a1848"
sh % 'hg log -R a -r 8 -T \'{"{"\\{{rev}} \\"{node|short}\\""}"}\\n\'' == '{8} "209edb6a1848"'
# Test recursive evaluation:
sh % "hg init r"
sh % "cd r"
sh % "echo a" > "a"
sh % "hg ci -Am '{rev}'" == "adding a"
sh % "hg log -r 0 --template '{if(rev, desc)}\\n'" == "{rev}"
sh % "hg log -r 0 --template '{if(rev, \"{author} {rev}\")}\\n'" == "test 0"
sh % "hg bookmark -q 'text.{rev}'"
sh % "echo aa" >> "aa"
sh % "hg ci -u '{node|short}' -m 'desc to be wrapped desc to be wrapped'"
sh % "hg log -l1 --template '{fill(desc, \"20\", author, bookmarks)}'" == r"""
{node|short}desc to
text.{rev}be wrapped
text.{rev}desc to be
text.{rev}wrapped"""
sh % 'hg log -l1 --template \'{fill(desc, "20", "{node|short}:", "text.{rev}:")}\'' == r"""
ea4c0948489d:desc to
text.1:be wrapped
text.1:desc to be
text.1:wrapped"""
sh % 'hg log -l1 -T \'{fill(desc, date, "", "")}\\n\'' == r"""
hg: parse error: fill expects an integer width
[255]"""
sh % "'COLUMNS=25' hg log -l1 --template '{fill(desc, termwidth, \"{node|short}:\", \"termwidth.{rev}:\")}'" == r"""
ea4c0948489d:desc to be
termwidth.1:wrapped desc
termwidth.1:to be wrapped"""
sh % 'hg log -l 1 --template \'{sub(r"[0-9]", "-", author)}\'' == "{node|short}"
sh % 'hg log -l 1 --template \'{sub(r"[0-9]", "-", "{node|short}")}\'' == "ea-c-------d"
(
sh % "cat"
<< r"""
[extensions]
color=
[color]
mode=ansi
text.{rev} = red
text.1 = green
"""
>> ".hg/hgrc"
)
sh % "hg log '--color=always' -l 1 --template '{label(bookmarks, \"text\\n\")}'" == "\\x1b[0;31mtext\\x1b[0m (esc)"
sh % "hg log '--color=always' -l 1 --template '{label(\"text.{rev}\", \"text\\n\")}'" == "\\x1b[0;32mtext\\x1b[0m (esc)"
# color effect can be specified without quoting:
sh % "hg log '--color=always' -l 1 --template '{label(red, \"text\\n\")}'" == "\\x1b[0;31mtext\\x1b[0m (esc)"
# color effects can be nested (issue5413)
sh % 'hg debugtemplate \'--color=always\' \'{label(red, "red{label(magenta, "ma{label(cyan, "cyan")}{label(yellow, "yellow")}genta")}")}\\n\'' == "\\x1b[0;31mred\\x1b[0;35mma\\x1b[0;36mcyan\\x1b[0m\\x1b[0;31m\\x1b[0;35m\\x1b[0;33myellow\\x1b[0m\\x1b[0;31m\\x1b[0;35mgenta\\x1b[0m (esc)"
# pad() should interact well with color codes (issue5416)
sh % "hg debugtemplate '--color=always' '{pad(label(red, \"red\"), 5, label(cyan, \"-\"))}\\n'" == "\\x1b[0;31mred\\x1b[0m\\x1b[0;36m-\\x1b[0m\\x1b[0;36m-\\x1b[0m (esc)"
# label should be no-op if color is disabled:
sh % "hg log '--color=never' -l 1 --template '{label(red, \"text\\n\")}'" == "text"
sh % "hg log --config 'extensions.color=!' -l 1 --template '{label(red, \"text\\n\")}'" == "text"
# Test dict constructor:
sh % "hg log -r 0 -T '{dict(y=node|short, x=rev)}\\n'" == "y=f7769ec2ab97 x=0"
sh % "hg log -r 0 -T '{dict(x=rev, y=node|short) % \"{key}={value}\\n\"}'" == r"""
x=0
y=f7769ec2ab97"""
sh % "hg log -r 0 -T '{dict(x=rev, y=node|short)|json}\\n'" == '{"x": 0, "y": "f7769ec2ab97"}'
sh % "hg log -r 0 -T '{dict()|json}\\n'" == "{}"
sh % "hg log -r 0 -T '{dict(rev, node=node|short)}\\n'" == "rev=0 node=f7769ec2ab97"
sh % "hg log -r 0 -T '{dict(rev, node|short)}\\n'" == "rev=0 node=f7769ec2ab97"
sh % "hg log -r 0 -T '{dict(rev, rev=rev)}\\n'" == r"""
hg: parse error: duplicated dict key 'rev' inferred
[255]"""
sh % "hg log -r 0 -T '{dict(node, node|short)}\\n'" == r"""
hg: parse error: duplicated dict key 'node' inferred
[255]"""
sh % "hg log -r 0 -T '{dict(1 + 2)}'" == r"""
hg: parse error: dict key cannot be inferred
[255]"""
sh % "hg log -r 0 -T '{dict(x=rev, x=node)}'" == r"""
hg: parse error: dict got multiple values for keyword argument 'x'
[255]"""
# Test get function:
sh % "hg log -r 0 --template '{get(extras, \"branch\")}\\n'" == "default"
sh % 'hg log -r 0 --template \'{get(extras, "br{"anch"}")}\\n\'' == "default"
sh % "hg log -r 0 --template '{get(files, \"should_fail\")}\\n'" == r"""
hg: parse error: get() expects a dict as first argument
[255]"""
# Test json filter applied to hybrid object:
sh % "hg log -r0 -T '{files|json}\\n'" == '["a"]'
sh % "hg log -r0 -T '{extras|json}\\n'" == '{"branch": "default"}'
# Test localdate(date, tz) function:
# TZ= does not override the global timezone state on Windows.
if os.name != "nt":
oldtz = os.environ.get("TZ")
os.environ["TZ"] = "JST-09"
import time
# tzset() is required for Python 3.6+ to recognize the timezone change.
# https://bugs.python.org/issue30062
time.tzset()
sh % "hg log -r0 -T '{date|localdate|isodate}\\n'" == "1970-01-01 09:00 +0900"
sh % "hg log -r0 -T '{localdate(date, \"UTC\")|isodate}\\n'" == "1970-01-01 00:00 +0000"
sh % "hg log -r0 -T '{localdate(date, \"blahUTC\")|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
sh % "hg log -r0 -T '{localdate(date, \"+0200\")|isodate}\\n'" == "1970-01-01 02:00 +0200"
sh % "hg log -r0 -T '{localdate(date, \"0\")|isodate}\\n'" == "1970-01-01 00:00 +0000"
sh % "hg log -r0 -T '{localdate(date, 0)|isodate}\\n'" == "1970-01-01 00:00 +0000"
if oldtz is not None:
os.environ["TZ"] = oldtz
else:
del os.environ["TZ"]
sh % "hg log -r0 -T '{localdate(date, \"invalid\")|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
sh % "hg log -r0 -T '{localdate(date, date)|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
# Test shortest(node) function:
sh % "echo b" > "b"
sh % "hg ci -qAm b"
sh % "hg log --template '{shortest(node)}\\n'" == r"""
21c1
ea4c
f776"""
sh % "hg log --template '{shortest(node, 10)}\\n'" == r"""
21c1b7ca5a
ea4c094848
f7769ec2ab"""
sh % "hg log --template '{node|shortest}\\n' -l1" == "21c1"
sh % 'hg log -r 0 -T \'{shortest(node, "1{"0"}")}\\n\'' == "f7769ec2ab"
sh % "hg log -r 0 -T '{shortest(node, \"not an int\")}\\n'" == r"""
hg: parse error: shortest() expects an integer minlength
[255]"""
sh % "hg log -r 'wdir()' -T '{node|shortest}\\n'" == "ffffffffffffffffffffffffffffffffffffffff"
sh % "cd .."
# Test shortest(node) with the repo having short hash collision:
sh % "hg init hashcollision"
sh % "cd hashcollision"
(
sh % "cat"
<< r"""
[experimental]
evolution.createmarkers=True
"""
>> ".hg/hgrc"
)
sh % "echo 0" > "a"
sh % "hg ci -qAm 0"
for i in [17, 129, 248, 242, 480, 580, 617, 1057, 2857, 4025]:
sh.hg("up", "-q", "0")
open("a", "wb").write(b"%s\n" % pycompat.encodeutf8(str(i)))
sh.hg("ci", "-qm", "%s" % i)
sh % "hg up -q null"
sh % "hg log '-r0:' -T '{rev}:{node}\\n'" == r"""
0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a
1:11424df6dc1dd4ea255eae2b58eaca7831973bbc
2:11407b3f1b9c3e76a79c1ec5373924df096f0499
3:11dd92fe0f39dfdaacdaa5f3997edc533875cfc4
4:10776689e627b465361ad5c296a20a487e153ca4
5:a00be79088084cb3aff086ab799f8790e01a976b
6:a0b0acd79b4498d0052993d35a6a748dd51d13e6
7:a0457b3450b8e1b778f1163b31a435802987fe5d
8:c56256a09cd28e5764f32e8e2810d0f01e2e357a
9:c5623987d205cd6d9d8389bfc40fff9dbb670b48
10:c562ddd9c94164376c20b86b0b4991636a3bf84f"""
sh % "hg debugobsolete a00be79088084cb3aff086ab799f8790e01a976b" == ""
sh % "hg debugobsolete c5623987d205cd6d9d8389bfc40fff9dbb670b48" == ""
sh % "hg debugobsolete c562ddd9c94164376c20b86b0b4991636a3bf84f" == ""
# nodes starting with '11' (we don't have the revision number '11' though)
sh % "hg log -r '1:3' -T '{rev}:{shortest(node, 0)}\\n'" == r"""
1:1142
2:1140
3:11d"""
# '5:a00' is hidden, but still we have two nodes starting with 'a0'
sh % "hg log -r '6:7' -T '{rev}:{shortest(node, 0)}\\n'" == r"""
6:a0b
7:a04"""
# node '10' conflicts with the revision number '10' even if it is hidden
# (we could exclude hidden revision numbers, but currently we don't)
sh % "hg log -r 4 -T '{rev}:{shortest(node, 0)}\\n'" == "4:107"
sh % "hg log -r 4 -T '{rev}:{shortest(node, 0)}\\n' --hidden" == "4:107"
# node 'c562' should be unique if the other 'c562' nodes are hidden
# (but we don't try the slow path to filter out hidden nodes for now)
sh % "hg log -r 8 -T '{rev}:{node|shortest}\\n'" == "8:c5625"
sh % "hg log -r '8:10' -T '{rev}:{node|shortest}\\n' --hidden" == r"""
8:c5625
9:c5623
10:c562d"""
sh % "cd .."
# Test pad function
sh % "cd r"
sh % "hg log --template '{pad(rev, 20)} {author|user}\\n'" == r"""
2 test
1 {node|short}
0 test"""
sh % "hg log --template '{pad(rev, 20, \" \", True)} {author|user}\\n'" == r"""
2 test
1 {node|short}
0 test"""
sh % "hg log --template '{pad(rev, 20, \"-\", False)} {author|user}\\n'" == r"""
2------------------- test
1------------------- {node|short}
0------------------- test"""
# Test unicode fillchar
sh % pycompat.decodeutf8(
b"'HGENCODING=utf-8' hg log -r 0 -T '{pad(\"hello\", 10, \"\xe2\x98\x83\")}world\\n'"
) == pycompat.decodeutf8(
b"hello\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83world"
)
# Test template string in pad function
sh % "hg log -r 0 -T '{pad(\"\\{{rev}}\", 10)} {author|user}\\n'" == "{0} test"
sh % "hg log -r 0 -T '{pad(r\"\\{rev}\", 10)} {author|user}\\n'" == "\\{rev} test"
# Test width argument passed to pad function
sh % 'hg log -r 0 -T \'{pad(rev, "1{"0"}")} {author|user}\\n\'' == "0 test"
sh % "hg log -r 0 -T '{pad(rev, \"not an int\")}\\n'" == r"""
hg: parse error: pad() expects an integer width
[255]"""
# Test invalid fillchar passed to pad function
sh % "hg log -r 0 -T '{pad(rev, 10, \"\")}\\n'" == r"""
hg: parse error: pad() expects a single fill character
[255]"""
sh % "hg log -r 0 -T '{pad(rev, 10, \"--\")}\\n'" == r"""
hg: parse error: pad() expects a single fill character
[255]"""
# Test boolean argument passed to pad function
# no crash
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "f{"oo"}")}\\n\'' == "---------0"
# string/literal
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "false")}\\n\'' == "---------0"
sh % "hg log -r 0 -T '{pad(rev, 10, \"-\", false)}\\n'" == "0---------"
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "")}\\n\'' == "0---------"
# unknown keyword is evaluated to ''
sh % "hg log -r 0 -T '{pad(rev, 10, \"-\", unknownkeyword)}\\n'" == "0---------"
# Test separate function
sh % 'hg log -r 0 -T \'{separate("-", "", "a", "b", "", "", "c", "")}\\n\'' == "a-b-c"
sh % 'hg log -r 0 -T \'{separate(" ", "{rev}:{node|short}", author|user, bookmarks)}\\n\'' == "0:f7769ec2ab97 test"
sh % 'hg log -r 0 \'--color=always\' -T \'{separate(" ", "a", label(red, "b"), "c", label(red, ""), "d")}\\n\'' == "a \\x1b[0;31mb\\x1b[0m c d (esc)"
# Test boolean expression/literal passed to if function
sh % "hg log -r 0 -T '{if(rev, \"rev 0 is True\")}\\n'" == "rev 0 is True"
sh % "hg log -r 0 -T '{if(0, \"literal 0 is True as well\")}\\n'" == "literal 0 is True as well"
sh % 'hg log -r 0 -T \'{if("", "", "empty string is False")}\\n\'' == "empty string is False"
sh % 'hg log -r 0 -T \'{if(revset(r"0 - 0"), "", "empty list is False")}\\n\'' == "empty list is False"
sh % "hg log -r 0 -T '{if(true, \"true is True\")}\\n'" == "true is True"
sh % 'hg log -r 0 -T \'{if(false, "", "false is False")}\\n\'' == "false is False"
sh % 'hg log -r 0 -T \'{if("false", "non-empty string is True")}\\n\'' == "non-empty string is True"
# Test ifcontains function
sh % 'hg log --template \'{rev} {ifcontains(rev, "2 two 0", "is in the string", "is not")}\\n\'' == r"""
2 is in the string
1 is not
0 is in the string"""
sh % 'hg log -T \'{rev} {ifcontains(rev, "2 two{" 0"}", "is in the string", "is not")}\\n\'' == r"""
2 is in the string
1 is not
0 is in the string"""
sh % 'hg log --template \'{rev} {ifcontains("a", file_adds, "added a", "did not add a")}\\n\'' == r"""
2 did not add a
1 did not add a
0 added a"""
sh % "hg log --debug -T '{rev}{ifcontains(1, parents, \" is parent of 1\")}\\n'" == r"""
2 is parent of 1
1
0"""
# Test revset function
sh % 'hg log --template \'{rev} {ifcontains(rev, revset("."), "current rev", "not current rev")}\\n\'' == r"""
2 current rev
1 not current rev
0 not current rev"""
sh % 'hg log --template \'{rev} {ifcontains(rev, revset(". + .^"), "match rev", "not match rev")}\\n\'' == r"""
2 match rev
1 match rev
0 not match rev"""
sh % 'hg log -T \'{ifcontains(desc, revset(":"), "", "type not match")}\\n\' -l1' == "type not match"
sh % "hg log --template '{rev} Parents: {revset(\"parents(%s)\", rev)}\\n'" == r"""
2 Parents: 1
1 Parents: 0
0 Parents:"""
(
sh % "cat"
<< r"""
[revsetalias]
myparents(x) = parents(x)
"""
>> ".hg/hgrc"
)
sh % "hg log --template '{rev} Parents: {revset(\"myparents(%s)\", rev)}\\n'" == r"""
2 Parents: 1
1 Parents: 0
0 Parents:"""
sh % 'hg log --template \'Rev: {rev}\\n{revset("::%s", rev) % "Ancestor: {revision}\\n"}\\n\'' == r"""
Rev: 2
Ancestor: 0
Ancestor: 1
Ancestor: 2
Rev: 1
Ancestor: 0
Ancestor: 1
Rev: 0
Ancestor: 0"""
sh % "hg log --template '{revset(\"TIP\"|lower)}\\n' -l1" == "2"
sh % 'hg log -T \'{revset("%s", "t{"ip"}")}\\n\' -l1' == "2"
# a list template is evaluated for each item of revset/parents
sh % 'hg log -T \'{rev} p: {revset("p1(%s)", rev) % "{rev}:{node|short}"}\\n\'' == r"""
2 p: 1:ea4c0948489d
1 p: 0:f7769ec2ab97
0 p:"""
sh % "hg log --debug -T '{rev} p:{parents % \" {rev}:{node|short}\"}\\n'" == r"""
2 p: 1:ea4c0948489d
1 p: 0:f7769ec2ab97
0 p: -1:000000000000"""
# therefore, 'revcache' should be recreated for each rev
sh % 'hg log -T \'{rev} {file_adds}\\np {revset("p1(%s)", rev) % "{file_adds}"}\\n\'' == r"""
2 aa b
p (trailing space)
1 (trailing space)
p a
0 a
p"""
sh % "hg log --debug -T '{rev} {file_adds}\\np {parents % \"{file_adds}\"}\\n'" == r"""
2 aa b
p (trailing space)
1 (trailing space)
p a
0 a
p"""
# a revset item must be evaluated as an integer revision, not an offset from tip
sh % 'hg log -l 1 -T \'{revset("null") % "{rev}:{node|short}"}\\n\'' == "-1:000000000000"
sh % 'hg log -l 1 -T \'{revset("%s", "null") % "{rev}:{node|short}"}\\n\'' == "-1:000000000000"
# join() should pick '{rev}' from revset items:
sh % 'hg log -R ../a -T \'{join(revset("parents(%d)", rev), ", ")}\\n\' -r6' == "4, 5"
# on the other hand, parents are formatted as '{rev}:{node|formatnode}' by
# default. join() should agree with the default formatting:
sh % "hg log -R ../a -T '{join(parents, \", \")}\\n' -r6" == "13207e5a10d9, 07fa1db10648"
sh % "hg log -R ../a -T '{join(parents, \",\\n\")}\\n' -r6 --debug" == r"""
13207e5a10d9fd28ec424934298e176197f2c67f,
07fa1db1064879a32157227401eb44b322ae53ce"""
# Test files function
sh % "hg log -T '{rev}\\n{join(files('\\''*'\\''), '\\''\\n'\\'')}\\n'" == r"""
2
a
aa
b
1
a
0
a"""
sh % "hg log -T '{rev}\\n{join(files('\\''aa'\\''), '\\''\\n'\\'')}\\n'" == r"""
2
aa
1
0"""
# Test relpath function
sh % "hg log -r0 -T '{files % \"{file|relpath}\\n\"}'" == "a"
sh % "cd .."
sh % "hg log -R r -r0 -T '{files % \"{file|relpath}\\n\"}'" == "r/a"
sh % "cd r"
# Test active bookmark templating
sh % "hg book foo"
sh % "hg book bar"
sh % "hg log --template '{rev} {bookmarks % '\\''{bookmark}{ifeq(bookmark, active, \"*\")} '\\''}\\n'" == r"""
2 bar* foo text.{rev} (trailing space)
1 (trailing space)
0"""
sh % "hg log --template '{rev} {activebookmark}\\n'" == r"""
2 bar
1 (trailing space)
0"""
sh % "hg bookmarks --inactive bar"
sh % "hg log --template '{rev} {activebookmark}\\n'" == r"""
2 (trailing space)
1 (trailing space)
0"""
sh % "hg book -r1 baz"
sh % "hg log --template '{rev} {join(bookmarks, '\\'' '\\'')}\\n'" == r"""
2 bar foo text.{rev}
1 baz
0"""
sh % "hg log --template '{rev} {ifcontains('\\''foo'\\'', bookmarks, '\\''t'\\'', '\\''f'\\'')}\\n'" == r"""
2 t
1 f
0 f"""
# Test namespaces dict
sh % 'hg --config "extensions.revnamesext=$TESTDIR/revnamesext.py" log -T \'{rev}\\n{namespaces % " {namespace} color={colorname} builtin={builtin}\\n {join(names, ",")}\\n"}\\n\'' == r"""
2
bookmarks color=bookmark builtin=True
bar,foo,text.{rev}
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r2
1
bookmarks color=bookmark builtin=True
baz
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r1
0
bookmarks color=bookmark builtin=True
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r0"""
# revert side effect of loading the revnames extension
del namespaces.namespacetable["revnames"]
sh % "hg log -r2 -T '{namespaces % \"{namespace}: {names}\\n\"}'" == r"""
bookmarks: bar foo text.{rev}
branches: default
remotebookmarks:"""
sh % 'hg log -r2 -T \'{namespaces % "{namespace}:\\n{names % " {name}\\n"}"}\'' == r"""
bookmarks:
bar
foo
text.{rev}
branches:
default
remotebookmarks:"""
sh % 'hg log -r2 -T \'{get(namespaces, "bookmarks") % "{name}\\n"}\'' == r"""
bar
foo
text.{rev}"""
sh % "hg log -r2 -T '{namespaces.bookmarks % \"{bookmark}\\n\"}'" == r"""
bar
foo
text.{rev}"""
# Test stringify on sub expressions
sh % "cd .."
sh % 'hg log -R a -r 8 --template \'{join(files, if("1", if("1", ", ")))}\\n\'' == "fourth, second, third"
sh % 'hg log -R a -r 8 --template \'{strip(if("1", if("1", "-abc-")), if("1", if("1", "-")))}\\n\'' == "abc"
# Test splitlines
sh % "hg log -Gv -R a --template '{splitlines(desc) % '\\''foo {line}\\n'\\''}'" == r"""
@ foo Modify, add, remove, rename
│
o foo future
│
o foo third
│
o foo second
o foo merge
├─╮
│ o foo new head
│ │
o │ foo new branch
├─╯
o foo no user, no domain
│
o foo no person
│
o foo other 1
│ foo other 2
│ foo
│ foo other 3
o foo line 1
foo line 2"""
sh % "hg log -R a -r0 -T '{desc|splitlines}\\n'" == "line 1 line 2"
sh % "hg log -R a -r0 -T '{join(desc|splitlines, \"|\")}\\n'" == "line 1|line 2"
# Test startswith
sh % "hg log -Gv -R a --template '{startswith(desc)}'" == r"""
hg: parse error: startswith expects two arguments
[255]"""
sh % "hg log -Gv -R a --template '{startswith('\\''line'\\'', desc)}'" == r"""
@
│
o
│
o
│
o
o
├─╮
│ o
│ │
o │
├─╯
o
│
o
│
o
│
o line 1
line 2"""
# Test bad template with better error message
sh % "hg log -Gv -R a --template '{desc|user()}'" == r"""
hg: parse error: expected a symbol, got 'func'
[255]"""
# Test word function (including index out of bounds graceful failure)
sh % "hg log -Gv -R a --template '{word('\\''1'\\'', desc)}'" == r"""
@ add,
│
o
│
o
│
o
o
├─╮
│ o head
│ │
o │ branch
├─╯
o user,
│
o person
│
o 1
│
o 1"""
# Test word third parameter used as splitter
sh % "hg log -Gv -R a --template '{word('\\''0'\\'', desc, '\\''o'\\'')}'" == r"""
@ M
│
o future
│
o third
│
o sec
o merge
├─╮
│ o new head
│ │
o │ new branch
├─╯
o n
│
o n
│
o
│
o line 1
line 2"""
# Test word error messages for not enough and too many arguments
sh % "hg log -Gv -R a --template '{word('\\''0'\\'')}'" == r"""
hg: parse error: word expects two or three arguments, got 1
[255]"""
sh % "hg log -Gv -R a --template '{word('\\''0'\\'', desc, '\\''o'\\'', '\\''h'\\'', '\\''b'\\'', '\\''o'\\'', '\\''y'\\'')}'" == r"""
hg: parse error: word expects two or three arguments, got 7
[255]"""
# Test word for integer literal
sh % "hg log -R a --template '{word(2, desc)}\\n' -r0" == "line"
# Test word for invalid numbers
sh % "hg log -Gv -R a --template '{word('\\''a'\\'', desc)}'" == r"""
hg: parse error: word expects an integer index
[255]"""
# Test word for out of range
sh % "hg log -R a --template '{word(10000, desc)}'"
sh % "hg log -R a --template '{word(-10000, desc)}'"
# Test indent and not adding to empty lines
sh % "hg log -T '-----\\n{indent(desc, '\\''.. '\\'', '\\'' . '\\'')}\\n' -r '0:1' -R a" == r"""
-----
. line 1
.. line 2
-----
. other 1
.. other 2
.. other 3"""
# Test with non-strings like dates
sh % "hg log -T '{indent(date, '\\'' '\\'')}\\n' -r '2:3' -R a" == r"""
1200000.00
1300000.00"""
# Test broken string escapes:
sh % "hg log -T 'bogus\\' -R a" == r"""
hg: parse error: trailing \ in string
[255]"""
sh % pycompat.decodeutf8(
b"hg log -T '\\xy' -R a"
) == r"""
hg: parse error: invalid \x escape* (glob)
[255]"""
# Templater supports aliases of symbol and func() styles:
sh % "hg clone -q a aliases"
sh % "cd aliases"
(
sh % "cat"
<< r"""
[templatealias]
r = rev
rn = "{r}:{node|short}"
status(c, files) = files % "{c} {file}\n"
utcdate(d) = localdate(d, "UTC")
"""
>> ".hg/hgrc"
)
sh % "hg debugtemplate -vr0 '{rn} {utcdate(date)|isodate}\\n'" == r"""
(template
(symbol 'rn')
(string ' ')
(|
(func
(symbol 'utcdate')
(symbol 'date'))
(symbol 'isodate'))
(string '\n'))
* expanded:
(template
(template
(symbol 'rev')
(string ':')
(|
(symbol 'node')
(symbol 'short')))
(string ' ')
(|
(func
(symbol 'localdate')
(list
(symbol 'date')
(string 'UTC')))
(symbol 'isodate'))
(string '\n'))
0:1e4e1b8f71e0 1970-01-12 13:46 +0000"""
sh % "hg debugtemplate -vr0 '{status(\"A\", file_adds)}'" == r"""
(template
(func
(symbol 'status')
(list
(string 'A')
(symbol 'file_adds'))))
* expanded:
(template
(%
(symbol 'file_adds')
(template
(string 'A')
(string ' ')
(symbol 'file')
(string '\n'))))
A a"""
# A unary function alias can be called as a filter:
sh % "hg debugtemplate -vr0 '{date|utcdate|isodate}\\n'" == r"""
(template
(|
(|
(symbol 'date')
(symbol 'utcdate'))
(symbol 'isodate'))
(string '\n'))
* expanded:
(template
(|
(func
(symbol 'localdate')
(list
(symbol 'date')
(string 'UTC')))
(symbol 'isodate'))
(string '\n'))
1970-01-12 13:46 +0000"""
# Aliases should be applied only to command arguments and templates in hgrc.
# Otherwise, our stock styles and web templates could be corrupted:
sh % "hg log -r0 -T '{rn} {utcdate(date)|isodate}\\n'" == "0:1e4e1b8f71e0 1970-01-12 13:46 +0000"
sh % "hg log -r0 --config 'ui.logtemplate=\"{rn} {utcdate(date)|isodate}\\n\"'" == "0:1e4e1b8f71e0 1970-01-12 13:46 +0000"
sh % "cat" << r"""
changeset = 'nothing expanded:{rn}\n'
""" > "tmpl"
sh % "hg log -r0 --style ./tmpl" == "nothing expanded:"
# Aliases in formatter:
sh % "hg bookmarks -T '{pad(bookmark, 7)} {rn}\\n'" == "foo :07fa1db10648"
# Aliases should honor HGPLAIN:
if os.name != "nt":
# Environment override does not work well across Python/Rust boundry on
# Windows. A solution will be changing the config parser take an environ
# instead of using hardcoded system env.
sh % "'HGPLAIN=' hg log -r0 -T 'nothing expanded:{rn}\\n'" == "nothing expanded:"
sh % "'HGPLAINEXCEPT=templatealias' hg log -r0 -T '{rn}\\n'" == "0:1e4e1b8f71e0"
# Unparsable alias:
sh % "hg debugtemplate --config 'templatealias.bad=x(' -v '{bad}'" == r"""
(template
(symbol 'bad'))
abort: bad definition of template alias "bad": at 2: not a prefix: end
[255]"""
sh % "hg log --config 'templatealias.bad=x(' -T '{bad}'" == r"""
abort: bad definition of template alias "bad": at 2: not a prefix: end
[255]"""
sh % "cd .."
# Set up repository for non-ascii encoding tests:
sh % "hg init nonascii"
sh % "cd nonascii"
utf8 = "\u00e9" # == "é"
open("utf-8", "wb").write(pycompat.encodeutf8(utf8))
sh % ("hg bookmark -q '%s'" % utf8)
sh % ("hg ci -qAm 'non-ascii branch: %s' utf-8" % utf8)
# json filter should try round-trip conversion to utf-8:
# Mercurial's json encoding works a little differently in Python 2 and 3 since
# it escapes bytes differently from unicode strings. Let's set the tests to test
# the long term vision of pure unicode.
import sys
if sys.version_info[0] >= 3:
sh % "hg log -T '{bookmarks|json}\\n' -r0" == '["\\u00e9"]'
sh % "hg log -T '{desc|json}\\n' -r0" == '"non-ascii branch: \\u00e9"'
# json filter takes input as utf-8b:
sh % ("hg log -T '{'\\''%s'\\''|json}\\n' -l1" % utf8) == '"\\u00e9"'
# pad width:
sh % (
"hg debugtemplate '{pad('\\''%s'\\'', 2, '\\''-'\\'')}\\n'" % utf8
) == "\u00e9- (esc)"
sh % "cd .."
# Test that template function in extension is registered as expected
sh % "cd a"
sh % "cat" << r"""
from edenscm.mercurial import registrar
templatefunc = registrar.templatefunc()
@templatefunc('custom()')
def custom(context, mapping, args):
return 'custom'
""" > "$TESTTMP/customfunc.py"
sh % "cat" << r"""
[extensions]
customfunc = $TESTTMP/customfunc.py
""" > ".hg/hgrc"
sh % "hg log -r . -T '{custom()}\\n' --config 'customfunc.enabled=true'" == "custom"
sh % "cd .."
# Test 'graphwidth' in 'hg log' on various topologies. The key here is that the
# printed graphwidths 3, 5, 7, etc. should all line up in their respective
# columns. We don't care about other aspects of the graph rendering here.
sh % "hg init graphwidth"
sh % "cd graphwidth"
sh % "'wrappabletext=a a a a a a a a a a a a'"
sh % "printf 'first\\n'" > "file"
sh % "hg add file"
sh % 'hg commit -m "$wrappabletext"'
sh % "printf 'first\\nsecond\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg checkout 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "printf 'third\\nfirst\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg merge" == r"""
merging file
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
│ @ 5
├─╯
o 3"""
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 5
├─╮
│ o 5
│ │
o │ 5
├─╯
o 3"""
sh % "hg checkout 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "printf 'third\\nfirst\\nsecond\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
│ o 7
│ ├─╮
│ │ o 7
├───╯
│ o 5
├─╯
o 3"""
sh % "hg log --graph -T '{graphwidth}' -r 3" == r"""
o 5
├─╮
│ │
~ ~"""
sh % "hg log --graph -T '{graphwidth}' -r 1" == r"""
o 3
│
~"""
sh % "hg merge" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % 'hg commit -m "$wrappabletext"'
sh % "printf 'seventh\\n'" >> "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
o 5
├─╮
│ o 5
│ │
o │ 7
├───╮
│ │ o 7
│ ├─╯
o │ 5
├─╯
o 3"""
# The point of graphwidth is to allow wrapping that accounts for the space taken
# by the graph.
sh % "'COLUMNS=10' hg log --graph -T '{fill(desc, termwidth - graphwidth)}'" == r"""
@ a a a a
│ a a a a
│ a a a a
o a a a
├─╮ a a a
│ │ a a a
│ │ a a a
│ o a a a
│ │ a a a
│ │ a a a
│ │ a a a
o │ a a
├───╮ a a
│ │ │ a a
│ │ │ a a
│ │ │ a a
│ │ │ a a
│ │ o a a
│ ├─╯ a a
│ │ a a
│ │ a a
│ │ a a
│ │ a a
o │ a a a
├─╯ a a a
│ a a a
│ a a a
o a a a a
a a a a
a a a a"""
# Something tricky happens when there are elided nodes; the next drawn row of
# edges can be more than one column wider, but the graph width only increases by
# one column. The remaining columns are added in between the nodes.
sh % "hg log --graph -T '{graphwidth}' -r '0|2|4|5'" == r"""
o 7
├─┬─╮
o ╷ ╷ 7
├─╯ ╷
│ o 7
├───╯
o 3"""
sh % "cd .."
# Confirm that truncation does the right thing
sh % "hg debugtemplate '{truncatelonglines(\"abcdefghijklmnopqrst\\n\", 10)}'" == "abcdefghij"
sh % pycompat.decodeutf8(
b'hg debugtemplate \'{truncatelonglines("abcdefghijklmnopqrst\\n", 10, "\xe2\x80\xa6")}\''
) == pycompat.decodeutf8(b"abcdefghi\xe2\x80\xa6 (esc)")
sh % "hg debugtemplate '{truncate(\"a\\nb\\nc\\n\", 2)}'" == r"""
a
b"""
sh % 'hg debugtemplate \'{truncate("a\\nb\\nc\\n", 2, "truncated\\n")}\'' == r"""
a
truncated"""
# Test case expressions
sh % "hg debugtemplate \"{case('a', 'a', 'A', 'b', 'B', 'c', 'C')}\"" == "A"
sh % "hg debugtemplate \"{case('b', 'a', 'A', 'b', 'B', 'c', 'C', 'D')}\"" == "B"
sh % "hg debugtemplate \"{case('x', 'a', 'A', 'b', 'B', 'c', 'C')}\"" == ""
sh % "hg debugtemplate \"{case('x', 'a', 'A', 'b', 'B', 'c', 'C', 'D')}\"" == "D"
|
gpl-2.0
| -8,763,705,720,192,593,000
| 28.222463
| 604
| 0.572474
| false
| 2.728079
| true
| false
| false
|
evolaemp/northeuralex_website
|
northeuralex/datatables.py
|
1
|
6527
|
from clld.db.meta import DBSession
from clld.web.datatables.base import Col, IntegerIdCol, LinkToMapCol, LinkCol
from clld.web.util.helpers import external_link, link, map_marker_img
from clld.web.util.htmllib import HTML
from clld.web import datatables
from northeuralex.models import Concept, Doculect, Word
"""
Columns
"""
class IsoCodeCol(Col):
"""
Custom column to set a proper title for the iso_code column of the
languages table.
"""
__kw__ = {'sTitle': 'ISO 639-3'}
class GlottoCodeCol(Col):
"""
Custom column to present the glotto_code column of the languages table as a
link to the respective languoid in Glottolog.
"""
__kw__ = {'sTitle': 'Glottocode'}
def format(self, doculect):
href = 'http://glottolog.org/resource/languoid/id/{}'.format(doculect.glotto_code)
return external_link(href, doculect.glotto_code)
class FamilyCol(Col):
"""
Custom column to replace the search with a drop-down and to add icons for
the family column of the languages table.
Unlike in, e.g., NextStepCol, the choices have to be set in the constructor
because otherwise the unit tests do not work.
The icons are handled in the format method, the code being stolen from the
datatable module of the clld-glottologfamily-plugin repo.
"""
def __init__(self, *args, **kwargs):
kwargs['choices'] = sorted([
x[0] for x in DBSession.query(Doculect.family).distinct()])
super().__init__(*args, **kwargs)
def format(self, doculect):
return HTML.div(map_marker_img(self.dt.req, doculect), ' ', doculect.family)
class SubfamilyCol(Col):
"""
Custom column to replace the search with a drop-down for the subfamily
column of the languages table.
Unlike in, e.g., NextStepCol, the choices have to be set in the constructor
because otherwise the unit tests do not work.
"""
def __init__(self, *args, **kwargs):
kwargs['choices'] = sorted([
x[0] for x in DBSession.query(Doculect.subfamily).distinct()])
super().__init__(*args, **kwargs)
class ConcepticonCol(Col):
"""
Custom column to present the concepticon_name column of the concepts table
as a link to the respective concept in the Concepticon.
"""
__kw__ = {'sTitle': 'Concepticon'}
def format(self, concept):
if concept.concepticon_id:
href = 'http://concepticon.clld.org/parameters/{}'.format(concept.concepticon_id)
return external_link(href, concept.concepticon_name)
else:
return ''
class ConceptLinkCol(LinkCol):
"""
Custom column to present the concept column of the words table as a link
with a title attribute containing the concept's English name.
"""
def format(self, item):
concept = self.get_obj(item)
if concept:
return link(self.dt.req, concept, **{'title': concept.english_name})
else:
return ''
class DoculectLinkCol(LinkCol):
"""
Custom column to present the doculect column of the words table as a link
with a title attribute containing the doculect's family and subfamily.
"""
def format(self, item):
doculect = self.get_obj(item)
if doculect:
title = '{} ({}, {})'.format(doculect.name,
doculect.family, doculect.subfamily)
return link(self.dt.req, doculect, **{'title': title})
else:
return ''
class NextStepCol(Col):
"""
Custom column to replace the search with a drop-down for the next_step
column of the words table. Also provides help info in the column's header.
"""
__kw__ = {
'sTitle': (
'<abbr title="'
'process → review → validate'
'">Next action</abbr>'),
'choices': [('validate', 'validate'),
('review', 'review'),
('process', 'process')] }
"""
Tables
"""
class LanguagesDataTable(datatables.Languages):
def col_defs(self):
return [
LinkToMapCol(self, 'm'),
LinkCol(self, 'name'),
GlottoCodeCol(self, 'glotto_code', model_col=Doculect.glotto_code),
IsoCodeCol(self, 'iso_code', model_col=Doculect.iso_code),
FamilyCol(self, 'family', model_col=Doculect.family),
SubfamilyCol(self, 'subfamily', model_col=Doculect.subfamily),
Col(self, 'latitude'),
Col(self, 'longitude') ]
class ConceptsDataTable(datatables.Parameters):
def col_defs(self):
return [
IntegerIdCol(self, 'id', model_col=Concept.id),
LinkCol(self, 'name'),
Col(self, 'english', model_col=Concept.english_name),
Col(self, 'german', model_col=Concept.german_name),
Col(self, 'russian', model_col=Concept.russian_name),
ConcepticonCol(self, 'concepticon', model_col=Concept.concepticon_name) ]
class WordsDataTable(datatables.Values):
def col_defs(self):
res = []
if self.language:
res.extend([
IntegerIdCol(self, 'id', model_col=Concept.id,
get_object=lambda x: x.valueset.parameter),
ConceptLinkCol(self, 'concept', model_col=Concept.name,
get_object=lambda x: x.valueset.parameter) ])
elif self.parameter:
res.extend([
DoculectLinkCol(self, 'language', model_col=Doculect.name,
get_object=lambda x: x.valueset.language) ])
res.extend([
Col(self, 'form', model_col=Word.name, sTitle='Orthographic form'),
Col(self, 'raw_ipa', model_col=Word.raw_ipa, sTitle='Automatically generated IPA'),
# Col(self, 'norm_ipa', model_col=Word.norm_ipa, sTitle='Normalised IPA'),
NextStepCol(self, 'next_step', model_col=Word.next_step) ])
return res
class SourcesDataTable(datatables.Sources):
def col_defs(self):
return super().col_defs()[:-1]
"""
Hooks
"""
def includeme(config):
"""
Magical (not in the good sense) hook that replaces the default data tables
with the custom ones defined in this module.
"""
config.register_datatable('languages', LanguagesDataTable)
config.register_datatable('parameters', ConceptsDataTable)
config.register_datatable('values', WordsDataTable)
config.register_datatable('sources', SourcesDataTable)
|
mit
| -2,206,149,159,916,614,000
| 28.251121
| 95
| 0.619807
| false
| 3.660494
| false
| false
| false
|
opena11y/fae2
|
fae2/populate/pop_wcag20.py
|
1
|
10340
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: populate/pop_wcag20.py
Author: Jon Gunderson
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import django
from django.core.exceptions import ObjectDoesNotExist
fp = os.path.realpath(__file__)
path, filename = os.path.split(fp)
fae2_path = path.split('/populate')[0]
sys.path.append(fae2_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
from django.conf import settings
django.setup()
from wcag20.models import Principle, Guideline, SuccessCriterion
"""This file is for populating the database with WCAG 2.0 References"""
# Principle.objects.all().delete()
# Guideline.objects.all().delete()
# SuccessCriterion.objects.all().delete()
def create_wcag20(wcag20):
print("wcag 2.0")
for principle in wcag20:
principle_url = 'http://www.w3.org/TR/WCAG20/#' + principle[2]
try:
wcag20_principle = Principle.objects.get(num=principle[0])
print(" " + wcag20_principle.title + " (found)")
wcag20_principle.title = principle[1]
wcag20_principle.url = principle_url
print(principle[1] + " (updated) " + principle[0])
except:
wcag20_principle = Principle(num=principle[0], title=principle[1], url=principle_url)
print(principle[1] + " (CREATED)")
wcag20_principle.save()
for guideline in principle[3]:
guideline_url = 'http://www.w3.org/TR/WCAG20/#' + guideline[2]
guideline_slug = 'p' + principle[0] + 'g' + str(guideline[0])
try:
wcag20_guideline = Guideline.objects.get(principle=wcag20_principle, num=guideline[0])
print(" " + wcag20_guideline.title + " (found)")
wcag20_guideline.title = guideline[1]
wcag20_guideline.url = guideline_url
wcag20_guideline.slug = guideline_slug
print(" " + guideline[1] + " (updated)")
except:
wcag20_guideline = Guideline(principle=wcag20_principle, num=guideline[0], title=guideline[1], url=guideline_url, slug=guideline_slug)
print(" " + guideline[1] + " (CREATED)")
wcag20_guideline.save()
for requirement in guideline[3]:
requirement_url = 'http://www.w3.org/TR/WCAG20/#' + requirement[2]
meet_url = 'http://www.w3.org/WAI/WCAG20/quickref/#qr-' + requirement[2] + '.html'
understand_url = 'http://www.w3.org/TR/WCAG20/' + requirement[2] + '.html'
requirement_slug = guideline_slug + 'sc' + str(requirement[0])
try:
wcag20_requirement = SuccessCriterion.objects.get(guideline=wcag20_guideline, num=requirement[0])
print(" " + wcag20_requirement.title + " (found)")
wcag20_requirement.title = requirement[1]
wcag20_requirement.url = requirement_url
wcag20_requirement.url_meet = meet_url
wcag20_requirement.url_understand = understand_url
wcag20_requirement.level = requirement[3]
wcag20_requirement.slug = requirement_slug
print(" " + requirement[1] + " (updated)")
except:
wcag20_requirement = SuccessCriterion(guideline=wcag20_guideline, num=requirement[0], title=requirement[1], url=requirement_url, url_meet=meet_url, url_understand=understand_url, level=requirement[3], slug=requirement_slug)
print(" " + requirement[1] + " (CREATED)")
wcag20_requirement.save()
wcag20 = (
('1', 'Perceivable - Information and user interface components must be presentable to users in ways they can perceive.', 'perceivable',
(
('1', 'Text Alternatives', 'text-equiv',
(
('1', 'Non-text Content', 'text-equiv', '1',),
),
),
('2', 'Time-based Media', 'media-equiv',
(
('1', 'Audio-only and Video-only (Prerecorded)', 'media-equiv-av-only-alt','1',),
('2', 'Captions (Prerecorded)', 'media-equiv-captions','1',),
('3', 'Audio Description or Media Alternative (Prerecorded)', 'media-equiv-audio-desc','1',),
('4', 'Captions (Live)', 'media-equiv-real-time-captions','2',),
('5', 'Audio Description (Prerecorded)', 'media-equiv-audio-desc-only','2',),
('6', 'Sign Language (Prerecorded)', 'media-equiv-sign','3',),
('7', 'Extended Audio Description (Prerecorded)', 'media-equiv-extended-ad','3',),
('8', 'Media Alternative (Prerecorded)', 'media-equiv-text-doc','3',),
('9', 'Audio-only (Live)', 'media-equiv-live-audio-only','3',),
),
),
('3', 'Adaptable', 'content-structure-separation',
(
('1', 'Info and Relationships', 'content-structure-separation-programmatic','1',),
('2', 'Meaningful Sequence', 'content-structure-separation-sequenc','1',),
('3', 'Sensory Characteristics', 'content-structure-separation-understanding','1',),
),
),
('4', 'Distinguishable', 'visual-audio-contrast',
(
('1', 'Use of Color', 'visual-audio-contrast-without-color','1',),
('2', 'Audio Control', 'visual-audio-contrast-dis-audio','1',),
('3', 'Contrast (Minimum)', 'visual-audio-contrast-contrast','2',),
('4', 'Resize text', 'visual-audio-contrast-scale','2',),
('5', 'Images of Text', 'visual-audio-contrast-text-presentation','2',),
('6', 'Contrast (Enhanced)', 'visual-audio-contrast7','3',),
('7', 'Low or No Background Audio', 'visual-audio-contrast-noaudio','3',),
('8', 'Visual Presentation', 'visual-audio-contrast-visual-presentation','3',),
('9', 'Images of Text (No Exception)', 'visual-audio-contrast-text-images','3',),
),
),
),
),
('2', 'Operable - User interface components and navigation must be operable.', 'perceivable',
(
('1', 'Keyboard Accessible', 'keyboard-operation',
(
('1', 'Keyboard', 'keyboard-operation-keyboard-operable', '1',),
('2', 'No Keyboard Trap', 'keyboard-operation-trapping', '1',),
('3', 'Keyboard (No Exception)', 'keyboard-operation-all-funcs', '3',),
),
),
('2', 'Enough Time', '',
(
('1', 'Timing Adjustable', 'time-limits-required-behaviors', '1',),
('2', 'Pause, Stop, Hide', 'time-limits-pause', '1',),
('3', 'No Timing', 'time-limits-no-exceptions', '3',),
('4', 'Interruptions', 'time-limits-postponed', '3',),
('5', 'Re-authenticating', 'time-limits-server-timeout', '3',),
),
),
('3', 'Seizures', 'seizure',
(
('1', 'Three Flashes or Below Threshold', 'seizure-does-not-violate', '1',),
('2', 'Three Flashes', 'seizure-three-times', '3',),
),
),
('4', 'Navigable', 'navigation-mechanisms',
(
('1', 'Bypass Blocks', 'navigation-mechanisms-skip', '1',),
('2', 'Page Titled', 'navigation-mechanisms-title', '1',),
('3', 'Focus Order', 'navigation-mechanisms-focus-order', '1',),
('4', 'Link Purpose (In Context)', 'navigation-mechanisms-refs', '1',),
('5', 'Multiple Ways', 'navigation-mechanisms-mult-loc', '2',),
('6', 'Headings and Labels', 'navigation-mechanisms-descriptive', '2',),
('7', 'Focus Visible', 'navigation-mechanisms-focus-visible', '2',),
('8', 'Location', 'navigation-mechanisms-location', '3',),
('9', 'Link Purpose (Link Only)', 'navigation-mechanisms-link', '3',),
('10', 'Section Headings', 'navigation-mechanisms-headings', '3',),
),
),
),
),
('3', 'Understandable - Information and the operation of user interface must be understandable.', 'understandable',
(
('1', 'Readable', 'meaning',
(
('1', 'Language of Page', 'meaning-doc-lang-id', '1',),
('2', 'Language of Parts', 'meaning-other-lang-id', '2',),
('3', 'Unusual Words', 'meaning-idioms', '3',),
('4', 'Abbreviations ', 'meaning-located', '3',),
('5', 'Reading Level', 'meaning-supplements', '3',),
('6', 'Pronunciation', 'meaning-pronunciation', '3',),
),
),
('2', 'Predictable', 'consistent-behavior',
(
('1', 'On Focus', 'consistent-behavior-receive-focus', '1',),
('2', 'On Input', 'consistent-behavior-unpredictable-change', '1',),
('3', 'Consistent Navigation', 'consistent-behavior-consistent-locations', '2',),
('4', 'Consistent Identification', 'consistent-behavior-consistent-functionality', '2',),
('5', 'Change on Request', 'consistent-behavior-no-extreme-changes-context', '3',),
),
),
('3', 'Input Assistance', 'minimize-error',
(
('1', 'Error Identification', 'minimize-error-identified', '1',),
('2', 'Labels or Instructions', 'minimize-error-cues', '1',),
('3', 'Error Suggestion', 'minimize-error-suggestions', '2',),
('4', 'Error Prevention (Legal, Financial, Data)', 'minimize-error-reversible', '2',),
('5', 'Help', 'minimize-error-context-help', '3',),
('6', 'Error Prevention (All)', 'minimize-error-reversible-all', '3',),
),
),
),
),
('4', 'Robust - Content must be robust enough that it can be interpreted reliably by a wide variety of user agents, including assistive technologies.', 'robust',
(
('1', ' Compatible', 'ensure-compat',
(
('1', 'Parsing', 'ensure-compat-parses', '1',),
('2', 'Name, Role, Value', 'ensure-compat-rsv', '1',),
),
),
)
)
)
create_wcag20( wcag20 )
|
apache-2.0
| -2,793,003,747,385,330,700
| 43.377682
| 241
| 0.583462
| false
| 3.41029
| false
| false
| false
|
cberridge/trappetroll
|
src/audio_player.py
|
1
|
3668
|
'''
Play mp3s and also call a function to move
the mouth when the mp3 starts to play
'''
import time
import os
import random
import glob
import pygame
import moves_play
class AudioPlayer(object):
'''-'''
def __init__(self, mouth_callback):
'''-'''
pygame.mixer.init()
pygame.mixer.music.set_volume(0)
# path relative to the directory in which this script runs
self.audio_directory = \
os.path.normpath(os.path.dirname(os.path.abspath(__file__))
+ '/../audio')
self.name_file = 'name_m.mp3'
self.mouth_callback = mouth_callback
self.is_killed = False
print self.audio_directory
def _play_single_mp3(self, file_name_with_path, volume=100.0):
'''-'''
self.is_killed = False
move_mouth = file_name_with_path.endswith('_m.mp3')
print "Playing... " + file_name_with_path
if pygame.mixer.music.get_busy():
pygame.mixer.stop()
pygame.mixer.music.set_volume(volume/100.0)
pygame.mixer.music.load(file_name_with_path)
pygame.mixer.music.play()
if move_mouth:
moves_play.play_moves(file_name_with_path,
self.mouth_callback,
self.is_playing,
async=False)
while self.is_playing():
time.sleep(0.01)
pygame.mixer.music.set_volume(0) # Removes hum
return not self.is_killed
def play_mp3(self, file_name, volume=100.0, has_path=False):
''' - '''
return_value = False
if has_path:
file_name_with_path = file_name
else:
file_name_with_path = self.audio_directory +'/'+ file_name
if os.path.isdir(file_name_with_path):
dir_name = file_name_with_path
# Get both directories and mp3 files in the current directory
dir_list = glob.glob(dir_name + '/*/')
# Remove the trailing slash!
dir_list = [directory[:-1] for directory in dir_list]
file_list = glob.glob(dir_name + '/*.mp3') + dir_list
file_list.sort()
if file_name_with_path.endswith('.rand'):
# play a random file in directory
random_index = int(random.random() * len(file_list))
return_value = self.play_mp3(file_list[random_index], volume,
True)
else:
# play all mp3s and directories in alphabetical order
for current_file in file_list:
if not self.play_mp3(current_file, volume, True):
return_value = False
break
elif file_name_with_path.endswith(self.name_file):
# if the file ends in name_m.mp3, don't play it, play the file
# in the top level audio directory
return_value = self._play_single_mp3(self.audio_directory + '/' +
self.name_file,
volume)
elif file_name_with_path.endswith('.mp3'):
return_value = self._play_single_mp3(file_name_with_path, volume)
else:
print 'no match: ' + file_name_with_path
return return_value
def kill_sound(self):
'''-'''
self.is_killed = True
pygame.mixer.music.stop()
pygame.mixer.music.set_volume(0)
@staticmethod
def is_playing():
'''-'''
return pygame.mixer.music.get_busy()
|
gpl-2.0
| 2,995,576,482,805,124,600
| 32.345455
| 77
| 0.526445
| false
| 3.927195
| false
| false
| false
|
jardiacaj/finem_imperii
|
world/management/commands/initialize_world.py
|
1
|
1240
|
import logging
from django.core.management.base import BaseCommand, CommandError
from world.initialization import initialize_world, AlreadyInitializedException
from world.models.geography import World
class Command(BaseCommand):
help = 'Initializes the specified world'
def add_arguments(self, parser):
parser.add_argument('world_id', nargs='+', type=int)
def handle(self, *args, **options):
logging.getLogger().setLevel(logging.INFO)
for world_id in options['world_id']:
try:
world = World.objects.get(pk=world_id)
except World.DoesNotExist:
raise CommandError(
'World with id {} does not exist'.format(world_id))
try:
initialize_world(world)
except AlreadyInitializedException:
raise CommandError('{} ({}) is already initialized'.format(
world,
world_id
))
self.stdout.write(
self.style.SUCCESS(
'Successfully initialized {} ({})'.format(
world,
world_id
)
)
)
|
agpl-3.0
| -7,105,990,472,116,270,000
| 30
| 78
| 0.53871
| false
| 5.232068
| false
| false
| false
|
ganga-devs/ganga
|
ganga/GangaCore/test/GPI/TestStartUp.py
|
1
|
2097
|
import os
import inspect
import sys
import shutil
import glob
from tempfile import mkdtemp
# First clear away any configurations and temp files which may not be present on first launch
homeDir = os.path.expanduser("~")
if os.path.exists(os.path.join(homeDir, '.gangarc')):
os.unlink(os.path.join(homeDir, '.gangarc'))
for logFile in glob.glob(os.path.join(homeDir, '.ganga.log*')):
os.unlink(logFile)
shutil.rmtree(os.path.join(homeDir, '.ipython-ganga'), ignore_errors=True)
shutil.rmtree(os.path.join(homeDir, '.gangarc_backups'), ignore_errors=True)
def standardSetup():
"""Function to perform standard setup for GangaCore.
"""
gangaDir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), '../../ganga'))
sys.path.insert(0, gangaDir)
from GangaCore.PACKAGE import standardSetup
standardSetup()
standardSetup()
del standardSetup
this_dir = mkdtemp()
def testStartUp():
""" Lets test the startup of Ganga mimicking first launch """
# Process options given at command line and in configuration file(s)
# Perform environment setup and bootstrap
from GangaCore.Runtime import setupGanga
argv = ['ganga', '--no-mon', '-o[Configuration]gangadir=%s' % this_dir, '-o[Configuration]RUNTIME_PATH=GangaTest']
setupGanga(argv=argv, interactive=False)
for this_file in ['.gangarc', '.ganga.log']:
assert os.path.isfile(os.path.join(homeDir, this_file))
# No way known to mimic IPython starting up in a simple way
#assert os.path.isdir(os.path.join(homeDir, '.ipython-ganga'))
for this_folder in ['repository',]:
assert os.path.isdir(os.path.join(this_dir, this_folder))
from GangaCore.GPI import Job
j=Job()
j.submit()
for this_folder in ['shared', 'workspace']:
assert os.path.isdir(os.path.join(this_dir, this_folder))
def testShutdown():
""" Lets just call the shutdown here for safety """
from GangaCore.testlib.GangaUnitTest import stop_ganga
stop_ganga()
shutil.rmtree(this_dir, ignore_errors=True)
|
gpl-2.0
| 2,743,772,439,762,664,000
| 29.838235
| 134
| 0.702909
| false
| 3.291994
| true
| false
| false
|
madscatt/zazzie_1.5
|
trunk/sassie/analyze/apbs/apbs.py
|
1
|
12389
|
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import locale
import string
import time
import subprocess
from write_apbs_input import *
import sassie.sasmol.sasmol as sasmol
import sassie.sasconfig as sasconfig
# APBS
#
# 12/05/2004 -- initial coding : jc
# 01/02/2011 -- added sasmol support : jc
# 08/26/2011 -- adapted for mdx : jc
# 06/16/2012 -- adapted for namd v. 2.9 : jc
# 09/10/2012 -- adapted for apbs : jc
#
# LC 1 2 3 4 5 6 7
# LC4567890123456789012345678901234567890123456789012345678901234567890123456789
# * **
'''
APBS is the module that contains the functions
that are used to run a series of electrostatic calculations
on a set of structures in a supplied pdb/dcd file.
This module is called from APBS in the main
GUI through the graphical_apbs.py script.
REFERENCE:
Baker NA, Sept D, Joseph S, Holst MJ, McCammon JA. Electrostatics of
nanosystems: application to microtubules and the ribosome.
Proc. Natl. Acad. Sci. USA 98, 10037-10041 2001.
M. Holst and F. Saied, Multigrid solution of the Poisson-Boltzmann equation.
J. Comput. Chem. 14, 105-113, 1993.
M. Holst and F. Saied, Numerical solution of the nonlinear Poisson-Boltzmann
equation: Developing more robust and efficient methods.
J. Comput. Chem. 16, 337-364, 1995.
M. Holst, Adaptive numerical treatment of elliptic systems on manifolds.
Advances in Computational Mathematics 15, 139-191, 2001.
R. Bank and M. Holst, A New Paradigm for Parallel Adaptive Meshing Algorithms.
SIAM Review 45, 291-323, 2003.
'''
def unpack_variables(variables):
runname = variables['runname'][0]
infile = variables['infile'][0]
pdbfile = variables['pdbfile'][0]
outfile = variables['outfile'][0]
temperature = variables['temperature'][0]
ph = variables['ph'][0]
ion_charge = variables['ion_charge'][0]
ion_conc = variables['ion_conc'][0]
ion_radius = variables['ion_radius'][0]
manual_flag = variables['manual_flag'][0]
manual_file = variables['manual_file'][0]
#energyfile = variables['energyfile'][0]
#keepout = variables['keepout'][0]
return runname, infile, pdbfile, outfile, temperature, ph, ion_charge, ion_conc, ion_radius, manual_flag, manual_file
def print_failure(message, txtOutput):
txtOutput.put("\n\n>>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n\n")
txtOutput.put(message)
return
def rename_his(m1):
natoms = m1.natoms()
resname = m1.resname()
new_resname = []
for i in xrange(natoms):
this_resname = resname[i]
if(this_resname == 'HSE' or this_resname == 'HSD' or this_resname == 'HSP'):
new_resname.append('HIS')
else:
new_resname.append(this_resname)
m1.setResname(new_resname)
return
def apbs_driver(variables, txtOutput):
'''
APBS_DRIVER is the function to read in variables from GUI input and
used to run a series of apbs calculations
on a set of structures in a supplied pdb/dcd file.
INPUT: variable descriptions:
runname: run_name
infile: input pdb or dcd filename
pdbfile: input pdb file (reference)
temperature: temperature of simulation
OUTPUT:
txtOutput: TK handler for output to GUI textbox
files stored in ~/run_name/apbs directory:
outfile: output filename
'''
runname, infile, pdbfile, outfile, temperature, ph, ion_charge, ion_conc, ion_radius, manual_flag, manual_file = unpack_variables(
variables)
keepout = 1
dcdfreq = 1
path = runname + '/apbs/'
print 'path = ', path
print 'infile = ', infile
vers = 'version 0.1 : 09/10/12 : jc'
direxist = os.path.exists(path)
if(direxist == 0):
try:
result = os.system('mkdir -p ' + path)
except:
message = 'can not create project directory: ' + path
message += '\nstopping here\n'
print_failure(message, txtOutput)
if(result != 0):
message = 'can not create project directory: ' + path
message += '\nstopping here\n'
print_failure(message, txtOutput)
m1 = sasmol.SasMol(0)
m2 = sasmol.SasMol(0)
m1.read_pdb(pdbfile)
m2.read_pdb(pdbfile, fastread=True)
rename_his(m1)
rename_his(m2)
try:
if(infile[-3:] == 'dcd'):
infiletype = 'dcd'
elif(infile[-3:] == 'pdb'):
infiletype = 'pdb'
except:
message = 'input filename is a PDB or DCD file but it must end with ".pdb" or ".dcd" '
message += ' : stopping here'
print_failure(message, txtOutput)
print 'infiletype = ', infiletype
if(infiletype == 'dcd'):
min_max = m2.calc_minmax_all_steps(infile)
dcdfile = m1.open_dcd_read(infile)
nf = dcdfile[2]
else:
m1.read_pdb(infile)
nf = m1.coor()[:, 0, 0].shape[0]
min_max = m2.calc_minmax_all_steps(infile, pdb='pdb')
print 'number of frames = ', nf
print 'min_max = ', min_max
maximum_dimensions = [min_max[1][0] - min_max[0][0],
min_max[1][1] - min_max[0][1], min_max[1][2] - min_max[0][2]]
print 'min_max = ', min_max
print 'maximum_dimensions = ', maximum_dimensions
# ttxt=time.ctime()
ttxt = time.asctime(time.gmtime(time.time()))
st = ''.join(['=' for x in xrange(60)])
txtOutput.put("\n%s \n" % (st))
txtOutput.put("DATA FROM RUN: %s \n\n" % (ttxt))
final_energy = []
coorlist = []
for i in range(nf):
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'writing temporary PDB file'
if(infiletype == 'dcd'):
m1.read_dcd_step(dcdfile, i)
m1.write_pdb(path + 'junk.pdb', 0, 'w')
else:
m1.write_pdb(path + 'junk.pdb', i, 'w')
print 'writing temporary APBS input file'
if(i < 9):
istr = '0000' + str(i + 1)
elif(i < 99):
istr = '000' + str(i + 1)
elif(i < 999):
istr = '00' + str(i + 1)
elif(i < 9999):
istr = '0' + str(i + 1)
elif(i < 99999):
istr = str(i + 1)
else:
print 'wow, man!'
istr = str(i + 1)
thisdcd = path + 'min_' + istr + '.dcd'
if(manual_flag == 0):
inputfilename = 'junk.in'
write_apbs_input(maximum_dimensions, temperature,
inputfilename, ion_charge, ion_conc, ion_radius)
else:
inputfilename = manual_file
print 'starting apbs calculation ( nfiles = ', nf, ')'
ttime = time.ctime()
runstring = vers + ' : ' + outfile + ' run stated at : ' + ttime
print runstring
ncpu = 1
bin_path = sasconfig._bin_path
if(ncpu == 1):
print 'starting pdb2pqr calculation number: ', istr
#run_pdb2pqr = 'python /usr/local/bin/pdb2pqr/pdb2pqr.py --ff=charmm --with-ph='+str(ph)+' -v '+path+'junk.pdb junk.pqr >& pdb2pqr.out'
run_pdb2pqr = 'python ' + bin_path + 'pdb2pqr.py --ff=charmm --with-ph=' + \
str(ph) + ' -v ' + path + 'junk.pdb junk.pqr >& pdb2pqr.out'
os.system(run_pdb2pqr)
print 'starting apbs calculation number: ', istr
#nst='/usr/local/bin/apbs junk.in >& junk.out &'
nst = bin_path + '/apbs junk.in >& junk.out &'
p = subprocess.Popen(nst, shell=True, executable='/bin/bash')
sts = os.waitpid(p.pid, 0)[1]
print 'p.pid = ', p.pid
thisjob = str(int(p.pid) + 1)
run = 1
esteps = 0
while(run == 1):
# time.sleep(5)
lsst = 'ls junk.out | grep -c "junk.out" '
lsfile = os.popen(lsst, 'r').readlines()
stls = string.split(lsfile[0])
nstls = locale.atoi(stls[0])
if(nstls > 0):
tout2 = os.popen(
'tail -15 junk.out | grep "Thanks for using"', 'r').readlines()
if(len(tout2) > 0):
print 'finished apbs calculation'
run = 0
fraction_done = (float(i + 1) / float(nf))
progress_string = 'COMPLETED ' + \
str(i + 1) + ' of ' + str(nf) + ' : ' + \
str(fraction_done * 100.0) + ' % done'
print('%s\n' % progress_string)
print('%s\n' % progress_string)
report_string = 'STATUS\t' + str(fraction_done)
txtOutput.put(report_string)
print 'finished run'
mvst = 'mv io.mc ' + path + 'apbs_' + istr + '_io.mc'
os.system(mvst)
mvst = 'mv pot.dx ' + path + 'apbs_' + istr + '_pot.dx.mc'
os.system(mvst)
mvst = 'mv pdb2pqr.out ' + path + 'apbs_' + istr + '_pdb2pqr.dat'
os.system(mvst)
mvst = 'mv ' + path + 'junk.pdb ' + path + 'apbs_' + istr + '.pdb'
os.system(mvst)
mvst = 'mv junk.out ' + path + 'apbs_' + istr + '.out'
os.system(mvst)
mvst = 'mv junk.pqr ' + path + 'apbs_' + istr + '.pqr'
os.system(mvst)
mvst = 'mv junk.propka ' + path + 'apbs_' + istr + '.propka'
os.system(mvst)
# mvst = 'mv junk-input.p '+path+'apbs_input.p.'+istr+'.pqr'
# os.system(mvst)
mvst = 'mv junk.in ' + path + 'apbs_' + istr + '.in'
os.system(mvst)
#os.system('mv energy_results.out '+path+'energy_results_'+istr+'.out')
if(infiletype == 'dcd'):
m1.close_dcd_read(dcdfile[0])
txtOutput.put("Total number of frames = %d\n\n" % (nf))
txtOutput.put("output energies saved to : %s\n" % ('./' + path))
txtOutput.put("\n%s \n" % (st))
time.sleep(0.5)
print 'APBS IS DONE'
return()
if __name__ == '__main__':
# BEGIN USER EDIT
# BEGIN USER EDIT
# BEGIN USER EDIT
runname = 'run_0'
pdbfile = 'ten_mer.pdb'
infile = 'ten_mer_two_frames.dcd'
outfile = 'apbs.dat'
ph = '5.5'
temperature = '300.0'
ion_conc = '0.15'
ion_charge = '1.0'
ion_radius = '1.62'
manual_flag = '0'
manual_file = 'test_input_file.txt'
# END USER EDIT
# END USER EDIT
# END USER EDIT
svariables = {}
svariables['runname'] = (runname, 'string')
svariables['infile'] = (infile, 'string')
svariables['pdbfile'] = (pdbfile, 'string')
svariables['outfile'] = (outfile, 'string')
svariables['ph'] = (ph, 'float')
svariables['temperature'] = (temperature, 'float')
svariables['ion_charge'] = (ion_charge, 'float')
svariables['ion_conc'] = (ion_conc, 'float')
svariables['ion_radius'] = (ion_radius, 'float')
svariables['manual_flag'] = (manual_flag,'int' )
svariables['manual_file'] = (manual_file, 'string')
import sassie.interface.input_filter as input_filter
error, variables = input_filter.type_check_and_convert(svariables)
if(len(error) > 0):
print 'error = ', error
sys.exit()
runname = variables['runname'][0]
import multiprocessing
import shutil
import os
if os.path.exists(runname + '/apbs'):
shutil.rmtree(runname + '/apbs')
txtQueue = multiprocessing.JoinableQueue()
apbs_driver(variables, txtQueue)
|
gpl-3.0
| -7,442,169,570,840,521,000
| 31.688654
| 147
| 0.566309
| false
| 3.255979
| false
| false
| false
|
wurstmineberg/systemd-minecraft
|
minecraft/__init__.py
|
1
|
39178
|
#!/usr/bin/env python3
"""Systemd init script for one or more vanilla Minecraft servers.
Usage:
minecraft [options] (start | stop | kill | restart | status | backup) [<world>...]
minecraft [options] (update | revert) [<world> [snapshot <snapshot-id> | <version>]]
minecraft [options] saves (on | off) [<world>...]
minecraft [options] update-all [snapshot <snapshot-id> | <version>]
minecraft [options] command <world> [--] <command>...
minecraft -h | --help
minecraft --version
Options:
-h, --help Print this message and exit.
--all Apply the action to all configured worlds.
--config=<config> Path to the config file [default: /opt/wurstmineberg/config/systemd-minecraft.json].
--enabled Apply the action to all enabled worlds. This option is intended to be used only by the service file, to automatically start all enabled worlds on boot.
--main Apply the action to the main world. This is the default.
--no-backup Don't back up the world(s) before updating/reverting.
--version Print version info and exit.
"""
import sys
sys.path.append('/opt/py')
import contextlib
import datetime
import docopt
import errno
import gzip
import json
import loops
import mcrcon
import more_itertools
import os
import signal
import os.path
import pathlib
import pwd
import re
import requests
import shutil
import socket
import subprocess
import threading
import time
import urllib.parse
try:
from minecraft.version import __version__
except ImportError:
__version__ = None
from wmb import get_config, from_assets
CONFIG = get_config("systemd-minecraft", base = from_assets(__file__))
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='Minecraft init script {}'.format(__version__))
for key in CONFIG['paths']:
if isinstance(CONFIG['paths'][key], str):
CONFIG['paths'][key] = pathlib.Path(CONFIG['paths'][key])
class World:
def __init__(self, name=None):
if name is None:
name = CONFIG['mainWorld']
if name in CONFIG['worlds']:
self.name = name
else:
raise ValueError('no such world')
def __repr__(self):
return 'minecraft.World({!r})'.format(self.name)
def __str__(self):
return self.name
def backup(self, announce=False, reply=print, path=None, *, copy_to_latest=None):
"""Back up the Minecraft world.
Optional arguments:
announce -- Whether to announce in-game that saves are being disabled/reenabled. Defaults to False.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
path -- Where the backup will be saved. The file extension .tar.gz will be appended automatically. Defaults to a file with the world name and a timestamp in the backups directory.
Keyword-only arguments:
copy_to_latest -- Whether to create or update the copy of the world directory at backups/latest. Defaults to True for the main world and to False for all other worlds.
Returns:
A pathlib.Path representing the gzipped backup tarball.
"""
if copy_to_latest is None:
copy_to_latest = self.is_main
self.save_off(announce=announce, reply=reply)
if path is None:
path = str(self.backup_path / '{}_{:%Y-%m-%d_%Hh%M}'.format(self.name, datetime.datetime.utcnow()))
else:
path = str(path)
backup_file = pathlib.Path(path + '.tar')
reply('Backing up minecraft world...')
if not backup_file.parent.exists():
# make sure the backup directory exists
backup_file.parent.mkdir(parents=True)
subprocess.call(['tar', '-C', str(self.path), '-cf', str(backup_file), self.world_path.name]) # tar the world directory (e.g. /opt/wurstmineberg/world/wurstmineberg/world or /opt/wurstmineberg/world/wurstmineberg/wurstmineberg)
if copy_to_latest:
# make a copy of the world directory for the main world to be used by map rendering
subprocess.call(['rsync', '-av', '--delete', str(self.world_path) + '/', str(self.backup_path / 'latest')])
self.save_on(announce=announce, reply=reply)
reply('Compressing backup...')
subprocess.call(['gzip', '-f', str(backup_file)])
backup_file = pathlib.Path(str(backup_file) + '.gz')
if self.is_main and CONFIG['paths']['backupWeb'] is not None:
reply('Symlinking to httpdocs...')
if CONFIG['paths']['backupWeb'].is_symlink():
CONFIG['paths']['backupWeb'].unlink()
CONFIG['paths']['backupWeb'].symlink_to(backup_file)
reply('Done.')
return backup_file
@property
def backup_path(self):
return CONFIG['paths']['backup'] / self.name
def command(self, cmd, args=[], block=False):
"""Send a command to the server.
Required arguments:
cmd -- The command name.
Optional arguments:
args -- A list of arguments passed to the command.
block -- If True and the server is not running, tries to wait until the server is running to send the command. Defaults to False.
Raises:
MinecraftServerNotRunningError -- If the world is not running and block is set to False.
socket.error -- If the world is running but the RCON connection failed.
"""
while not self.status():
if block:
time.sleep(1)
else:
raise MinecraftServerNotRunningError('')
cmd += (' ' + ' '.join(str(arg) for arg in args)) if len(args) else ''
rcon = mcrcon.MCRcon()
rcon.connect('localhost', self.config['rconPort'], self.config['rconPassword'])
return rcon.command(cmd)
def cleanup(self, reply=print):
if self.pidfile_path.exists():
reply("Removing PID file...")
self.pidfile_path.unlink()
if self.socket_path.exists():
reply("Removing socket file...")
self.socket_path.unlink()
@property
def config(self):
ret = {
'customServer': CONFIG['worlds'][self.name].get('customServer', False),
'enabled': CONFIG['worlds'][self.name].get('enabled', False),
'javaOptions': CONFIG['javaOptions'].copy(),
'rconPassword': CONFIG['worlds'][self.name].get('rconPassword'),
'rconPort': CONFIG['worlds'][self.name].get('rconPort', 25575),
'whitelist': CONFIG['whitelist'].copy()
}
ret['javaOptions'].update(CONFIG['worlds'][self.name].get('javaOptions', {}))
ret['whitelist'].update(CONFIG['worlds'][self.name].get('whitelist', {}))
return ret
@property
def is_main(self):
return self.name == CONFIG['mainWorld']
def iter_update(self, version=None, snapshot=False, *, reply=print, log_path=None, make_backup=True, override=None):
"""Download a different version of Minecraft and restart the world if it is running. Returns a generator where each iteration performs one step of the update process.
Optional arguments:
version -- If given, a version with this name will be downloaded. By default, the newest available version is downloaded.
snapshot -- If version is given, this specifies whether the version is a development version. If no version is given, this specifies whether the newest stable version or the newest development version should be downloaded. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop and start functions if the server is stopped before the update.
make_backup -- Whether to back up the world before updating. Defaults to True.
override -- If this is true and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to True if the target version is the current version, False otherwise.
reply -- This function is called several times with a string argument representing update progress. Defaults to the built-in print function.
"""
# get version
versions_json = requests.get('https://launchermeta.mojang.com/mc/game/version_manifest.json').json()
if version is None: # try to dynamically get the latest version number from assets
version = versions_json['latest']['snapshot' if snapshot else 'release']
elif snapshot:
version = datetime.datetime.utcnow().strftime('%yw%V') + version
for version_dict in versions_json['versions']:
if version_dict.get('id') == version:
snapshot = version_dict.get('type') == 'snapshot'
break
else:
reply('Minecraft version not found in assets, will try downloading anyway')
version_dict = None
version_text = 'Minecraft {} {}'.format('snapshot' if snapshot else 'version', version)
yield {
'version': version,
'is_snapshot': snapshot,
'version_text': version_text
}
old_version = self.version()
if override is None:
override = version == old_version
if version_dict is not None and 'url' in version_dict:
version_json = requests.get(version_dict['url']).json()
else:
version_json = None
# back up world in background
if make_backup:
backup_path = self.backup_path / 'pre-update' / '{}_{:%Y-%m-%d_%Hh%M}_{}_{}'.format(self.name, datetime.datetime.utcnow(), old_version, version)
backup_thread = threading.Thread(target=self.backup, kwargs={'reply': reply, 'path': backup_path})
backup_thread.start()
# get server jar
jar_path = CONFIG['paths']['jar'] / 'minecraft_server.{}.jar'.format(version)
if override and jar_path.exists():
jar_path.unlink()
if not jar_path.exists():
_download('https://s3.amazonaws.com/Minecraft.Download/versions/{0}/minecraft_server.{0}.jar'.format(version), local_filename=str(jar_path))
# get client jar
if 'clientVersions' in CONFIG['paths']:
with contextlib.suppress(FileExistsError):
(CONFIG['paths']['clientVersions'] / version).mkdir(parents=True)
_download('https://s3.amazonaws.com/Minecraft.Download/versions/{0}/{0}.jar'.format(version) if version_json is None else version_json['downloads']['client']['url'], local_filename=str(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version)))
# wait for backup to finish
if make_backup:
yield 'Download finished. Waiting for backup to finish...'
backup_thread.join()
yield 'Backup finished. Stopping server...'
else:
yield 'Download finished. Stopping server...'
# stop server
was_running = self.status()
if was_running:
self.say('Server will be upgrading to ' + version_text + ' and therefore restart')
time.sleep(5)
self.stop(reply=reply, log_path=log_path)
yield 'Server stopped. Installing new server...'
# install new server
if self.service_path.exists():
self.service_path.unlink()
self.service_path.symlink_to(CONFIG['paths']['jar'] / 'minecraft_server.{}.jar'.format(version))
client_jar_path = CONFIG['paths']['home'] / 'home' / 'client.jar'
# update Mapcrafter textures
if self.is_main:
if client_jar_path.exists():
client_jar_path.unlink()
client_jar_path.symlink_to(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version))
if CONFIG['updateMapcrafterTextures']:
try:
subprocess.check_call(['mapcrafter_textures.py', str(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version)), '/usr/local/share/mapcrafter/textures'])
except Exception as e:
reply('Error while updating mapcrafter textures: {}'.format(e))
# restart server
if was_running:
self.start(reply=reply, start_message='Server updated. Restarting...', log_path=log_path)
def kill(self, reply=print):
"""Kills a non responding minecraft server using the PID saved in the PID file."""
with self.pidfile_path.open("r") as pidfile:
pid = int(pidfile.read())
reply("World '" + self.name + "': Sending SIGTERM to PID " + str(pid) + " and waiting 60 seconds for shutdown...")
try:
os.kill(pid, signal.SIGTERM)
for _ in range(60):
live = self.pidrunning(pid)
if not live:
reply("Terminated world '" + self.name + "'")
break
time.sleep(1)
else:
reply("Could not terminate with SIGQUIT. Sending SIGKILL to PID " + str(pid) + "...")
os.kill(pid, signal.SIGKILL)
except ProcessLookupError:
reply("Process does not exist. Cleaning up...")
finally:
self.cleanup(reply)
return not self.status()
@property
def path(self):
return CONFIG['paths']['worlds'] / self.name
@property
def pid(self):
try:
with self.pidfile_path.open("r") as pidfile:
return int(pidfile.read())
except FileNotFoundError:
return None
def pidrunning(self, pid):
try:
os.kill(pid, 0)
return True
except ProcessLookupError:
return False
except PermissionError:
# Process exists but you can't send signals
return True
def pidstatus(self, reply=print):
if self.pidfile_path.exists() and self.pid is not None:
if self.pidrunning(self.pid):
return True
elif self.pidfile_path.exists():
reply("PID file exists but process is terminated. Cleaning up...")
self.cleanup(reply)
return False
@property
def pidfile_path(self):
return CONFIG['paths']['pidfiles'] / (self.name + ".pid")
def restart(self, *args, **kwargs):
reply = kwargs.get('reply', print)
if not self.stop(*args, **kwargs):
return False
kwargs['start_message'] = kwargs.get('start_message', 'Server stopped. Restarting...')
return self.start(*args, **kwargs)
def revert(self, path_or_version=None, snapshot=False, *, log_path=None, make_backup=True, override=False, reply=print):
"""Revert to a different version of Minecraft and restore a pre-update backup.
Optional arguments:
path_or_version -- If given, a pathlib.Path pointing at the backup file to be restored, or the Minecraft version to which to restore. By default, the newest available pre-update backup is restored.
snapshot -- If true, single-letter Minecraft versions will be expanded to include the current year and week number. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop function if the server is stopped before the revert.
make_backup -- Whether to back up the world before reverting. Defaults to True.
override -- If this is True and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to False.
reply -- This function is called several times with a string argument representing revert progress. Defaults to the built-in print function.
"""
# determine version and backup path
if path_or_version is None:
path = sorted((self.backup_path / 'pre-update').iterdir(), key=lambda path: path.stat().st_mtime, reverse=True)[0] # latest pre-update backup
version = path.name.split('_')[3]
elif isinstance(path_or_version, pathlib.Path):
path = path_or_version
version = path.name.split('_')[3]
else:
version = path_or_version
if snapshot and len(version) == 1:
version = datetime.datetime.utcnow().strftime('%yw%V') + version
path = next(path for path in sorted((self.backup_path / 'pre-update').iterdir(), key=lambda path: path.stat().st_mtime, reverse=True) if path.name.split('_')[3] == version)
# start iter_update
update_iterator = self.iter_update(version, log_path=log_path, make_backup=False, override=override, reply=reply)
version_dict = next(update_iterator)
reply('Downloading ' + version_dict['version_text'])
# make a backup to backup/<world>/reverted
if make_backup:
old_version = self.version()
backup_path = self.backup_path / 'reverted' / '{}_{:%Y-%m-%d_%Hh%M}_{}_{}'.format(self.name, datetime.datetime.utcnow(), old_version, version)
self.backup(reply=reply, path=backup_path, copy_to_latest=False)
# stop the server
was_running = self.status()
if was_running:
self.say('Server will be reverting to ' + version_dict["version_text"] + ' and therefore restart')
time.sleep(5)
self.stop(reply=reply, log_path=log_path)
reply('Server stopped. Restoring backup...')
# revert Minecraft version
for message in update_iterator:
reply(message)
# restore backup
world_path = self.world_path
if world_path.exists():
shutil.rmtree(str(world_path))
subprocess.call(['tar', '-C', str(self.path), '-xzf', str(path), world_path.name]) # untar tar the world backup
# restart server
if was_running:
self.start(reply=reply, start_message='Server reverted. Restarting...', log_path=log_path)
return version_dict['version'], version_dict['is_snapshot'], version_dict['version_text']
def save_off(self, announce=True, reply=print):
"""Turn off automatic world saves, then force-save once.
Optional arguments:
announce -- Whether to announce in-game that saves are being disabled.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
"""
if self.status():
reply('Minecraft is running... suspending saves')
if announce:
self.say('Server backup starting. Server going readonly...')
self.command('save-off')
self.command('save-all')
time.sleep(10)
os.sync()
else:
reply('Minecraft is not running. Not suspending saves.')
def save_on(self, announce=True, reply=print):
"""Enable automatic world saves.
Optional arguments:
announce -- Whether to announce in-game that saves are being enabled.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
"""
if self.status():
reply('Minecraft is running... re-enabling saves')
self.command('save-on')
if announce:
self.say('Server backup ended. Server going readwrite...')
else:
reply('Minecraft is not running. Not resuming saves.')
def say(self, message, prefix=True):
"""Broadcast a message in the world's in-game chat. This is a simple wrapper around the /say and /tellraw commands.
Required arguments:
message -- The message to display in chat.
Optional arguments:
prefix -- If False, uses /tellraw instead of /say to send a message without the [server] prefix. Defaults to True.
"""
if prefix:
self.command('say', [message])
else:
self.tellraw(message)
@property
def service_path(self):
return self.path / CONFIG['paths']['service']
@property
def socket_path(self):
return CONFIG['paths']['sockets'] / self.name
def start(self, *args, **kwargs):
def feed_commands(java_popen):
"""This function will run a loop to feed commands sent through the socket to minecraft"""
mypid = os.getpid()
loop_var = True
with socket.socket(socket.AF_UNIX) as s:
# Set 1 minute timeout so that the process actually exits (this is not crucial but we don't want to spam the system)
s.settimeout(60)
if self.socket_path.exists():
self.socket_path.unlink()
s.bind(str(self.socket_path))
while loop_var and self.socket_path.exists():
if not self.pidrunning(java_popen.pid):
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except:
pass
return
str_buffer = ''
try:
s.listen(1)
c, _ = s.accept()
while loop_var:
data = c.recv(4096)
if not data:
break
lines = (str_buffer + data.decode('utf-8')).split('\n')
for line in lines[:-1]:
if line == 'stop':
loop_var = False
break
java_popen.stdin.write(line.encode('utf-8') + b'\n')
java_popen.stdin.flush()
str_buffer = lines[-1]
try:
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
except (socket.timeout, socket.error):
continue
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except:
pass
java_popen.communicate(input=b'stop\n')
if self.socket_path.exists():
self.socket_path.unlink()
invocation = [
'java',
'-Xmx' + str(self.config['javaOptions']['maxHeap']) + 'M',
'-Xms' + str(self.config['javaOptions']['minHeap']) + 'M',
'-XX:+UseConcMarkSweepGC',
'-XX:ParallelGCThreads=' + str(self.config['javaOptions']['cpuCount']),
'-XX:+AggressiveOpts',
'-Dlog4j.configurationFile=' + str(CONFIG['paths']['logConfig']),
'-jar',
str(CONFIG['paths']['service'])
] + self.config['javaOptions']['jarOptions']
reply = kwargs.get('reply', print)
if self.status():
reply('Server is already running!')
return False
reply(kwargs.get('start_message', 'Starting Minecraft server...'))
if not self.socket_path.parent.exists():
# make sure the command sockets directory exists
self.socket_path.parent.mkdir(parents=True)
if not self.pidfile_path.parent.exists():
# make sure the pidfile directory exists
self.pidfile_path.parent.mkdir(parents=True)
java_popen = subprocess.Popen(invocation, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=str(self.path)) # start the java process
with self.pidfile_path.open("w+") as pidfile:
pidfile.write(str(java_popen.pid))
for line in loops.timeout_total(java_popen.stdout, datetime.timedelta(seconds=CONFIG['startTimeout'])): # wait until the timeout has been exceeded...
if re.match('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} \\[Server thread/INFO\\]: Done \\([0-9]+.[0-9]+s\\)!', line.decode('utf-8')): # ...or the server has finished starting
break
_fork(feed_commands, java_popen) # feed commands from the socket to java
_fork(more_itertools.consume, java_popen.stdout) # consume java stdout to prevent deadlocking
if kwargs.get('log_path'):
with (kwargs['log_path'].open('a') if hasattr(kwargs['log_path'], 'open') else open(kwargs['log_path'], 'a')) as logins_log:
ver = self.version()
print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + (' @restart' if ver is None else ' @start ' + ver), file=logins_log) # logs in UTC
# Wait for the socket listener to spin up
for _ in range(20):
if not self.status():
time.sleep(0.5)
else:
break
return self.status()
def status(self, reply=print):
return self.pidstatus(reply=reply) and self.socket_path.exists()
def stop(self, *args, **kwargs):
reply = kwargs.get('reply', print)
if self.status():
try:
reply('SERVER SHUTTING DOWN IN 10 SECONDS. Saving map...')
notice = kwargs.get('notice', 'SERVER SHUTTING DOWN IN 10 SECONDS. Saving map...')
if self.config['rconPassword'] is None:
reply('Cannot communicate with the world, missing RCON password! Killing...')
return self.kill()
if notice is not None:
self.say(str(notice))
self.command('save-all')
time.sleep(10)
self.command('stop')
time.sleep(7)
for _ in range(12):
if self.status():
time.sleep(5)
continue
else:
break
else:
reply('The server could not be stopped! Killing...')
return self.kill()
if kwargs.get('log_path'):
with (kwargs['log_path'].open('a') if hasattr(kwargs['log_path'], 'open') else open(kwargs['log_path'], 'a')) as logins_log:
print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ' @stop', file=logins_log) # logs in UTC
except ConnectionRefusedError:
reply("Can't communicate with the socket. We need to kill the server...")
return self.kill()
else:
reply('Minecraft server was not running.')
self.cleanup(reply=reply)
return not self.status()
def tellraw(self, message_dict, player='@a'):
if isinstance(message_dict, str):
message_dict = {'text': message_dict}
elif isinstance(message_dict, list):
message_dict = {'text': '', 'extra': message_dict}
try:
import api.util2
except ImportError:
pass # no support for Player objects
else:
if isinstance(player, api.util2.Player):
player = player.data['minecraft']['nicks'][-1]
self.command('tellraw', [player, json.dumps(message_dict)])
def update(self, version=None, snapshot=False, *, log_path=None, make_backup=True, override=False, reply=print):
"""Download a different version of Minecraft and restart the server if it is running.
Optional arguments:
version -- If given, a version with this name will be downloaded. By default, the newest available version is downloaded.
snapshot -- If version is given, this specifies whether the version is a development version. If no version is given, this specifies whether the newest stable version or the newest development version should be downloaded. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop function if the server is stopped before the update.
make_backup -- Whether to back up the world before updating. Defaults to True.
override -- If this is True and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to False.
reply -- This function is called several times with a string argument representing update progress. Defaults to the built-in print function.
Returns:
The new version, a boolean indicating whether or not the new version is a snapshot (or pre-release), and the full name of the new version.
Raises:
NotImplementedError -- For worlds with custom servers.
"""
if self.config['customServer']:
raise NotImplementedError('Update is not implemented for worlds with custom servers')
update_iterator = self.iter_update(version=version, snapshot=snapshot, log_path=log_path, make_backup=make_backup, override=override, reply=reply)
version_dict = next(update_iterator)
reply('Downloading ' + version_dict['version_text'])
for message in update_iterator:
reply(message)
return version_dict['version'], version_dict['is_snapshot'], version_dict['version_text']
def update_whitelist(self, people_file=None):
# get wanted whitelist from people file
if people_file is None:
people = people.get_people_db().obj_dump(version=3)
else:
with open(str(people_file)) as people_fobj:
people = json.load(people_fobj)['people']
whitelist = []
additional = self.config['whitelist']['additional']
if not self.config['whitelist']['ignorePeople']:
for person in people:
if not ('minecraft' in person or 'minecraftUUID' in person):
continue
if person.get('status', 'later') not in ['founding', 'later', 'postfreeze']:
continue
if person.get('minecraftUUID'):
uuid = person['minecraftUUID'] if isinstance(person['minecraftUUID'], str) else format(person['minecraftUUID'], 'x')
if 'minecraft' in person:
name = person['minecraft']
else:
name = requests.get('https://api.mojang.com/user/profiles/{}/names'.format(uuid)).json()[-1]['name']
else:
response_json = requests.get('https://api.mojang.com/users/profiles/minecraft/{}'.format(person['minecraft'])).json()
uuid = response_json['id']
name = response_json['name']
if '-' not in uuid:
uuid = uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:]
whitelist.append({
'name': name,
'uuid': uuid
})
# write whitelist
whitelist_path = self.path / 'whitelist.json'
with whitelist_path.open('a'):
os.utime(str(whitelist_path), None) # touch the file
with whitelist_path.open('w') as whitelist_json:
json.dump(whitelist, whitelist_json, sort_keys=True, indent=4, separators=(',', ': '))
# apply changes to whitelist files
self.command('whitelist', ['reload'])
# add people with unknown UUIDs to new whitelist using the command
for name in additional:
self.command('whitelist', ['add', name])
# update people file
try:
import lazyjson
except ImportError:
return
try:
with whitelist_path.open() as whitelist_json:
whitelist = json.load(whitelist_json)
except ValueError:
return
people = lazyjson.File(CONFIG['paths']['people'])
for whitelist_entry in whitelist:
for person in people['people']:
if person.get('minecraftUUID') == whitelist_entry['uuid']:
if 'minecraft' in person and person.get('minecraft') != whitelist_entry['name'] and person.get('minecraft') not in person.get('minecraft_previous', []):
if 'minecraft_previous' in person:
person['minecraft_previous'].append(person['minecraft'])
else:
person['minecraft_previous'] = [person['minecraft']]
person['minecraft'] = whitelist_entry['name']
elif person.get('minecraft') == whitelist_entry['name'] and 'minecraftUUID' not in person:
person['minecraftUUID'] = whitelist_entry['uuid']
def version(self):
"""Returns the version of Minecraft the world is currently configured to run. For worlds with custom servers, returns None instead.
"""
if self.config['customServer']:
return None
return self.service_path.resolve().stem[len('minecraft_server.'):]
@property
def world_path(self):
"""Returns the world save directory"""
result = self.path / 'world'
if not result.exists():
return self.path / self.name
return result
class MinecraftServerNotRunningError(Exception):
pass
def _command_output(cmd, args=[]):
p = subprocess.Popen([cmd] + args, stdout=subprocess.PIPE)
out, _ = p.communicate()
return out.decode('utf-8')
def _download(url, local_filename=None): #FROM http://stackoverflow.com/a/16696317/667338
if local_filename is None:
local_filename = url.split('#')[0].split('?')[0].split('/')[-1]
if local_filename == '':
raise ValueError('no local filename specified')
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def _fork(func, *args, **kwargs):
#FROM http://stackoverflow.com/a/6011298/667338
# do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# parent process, return and keep running
return
except OSError as e:
print('fork #1 failed: %d (%s)' % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
os.setsid()
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
print('fork #2 failed: %d (%s)' % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
with open(os.path.devnull) as devnull:
sys.stdin = devnull
sys.stdout = devnull
func(*args, **kwargs) # do stuff
os._exit(os.EX_OK) # all done
def worlds():
"""Iterates over all configured worlds."""
for world_name in CONFIG['worlds'].keys():
yield World(world_name)
if __name__ == '__main__':
try:
expect_user = CONFIG["runUser"]
wurstmineberg_user = pwd.getpwnam(expect_user)
except:
sys.exit('[!!!!] User ‘{}’ does not exist!'.format(expect_user))
if os.geteuid() != wurstmineberg_user.pw_uid:
sys.exit('[!!!!] Only the user ‘{}’ may use this program!'.format(expect_user))
if arguments['--all'] or arguments['update-all']:
selected_worlds = worlds()
elif arguments['--enabled']:
selected_worlds = filter(lambda world: world.config['enabled'], worlds())
elif arguments['<world>']:
selected_worlds = (World(world_name) for world_name in arguments['<world>'])
else:
selected_worlds = [World()]
if arguments['kill']:
for world in selected_worlds:
if world.pidstatus():
world.kill()
else:
sys.exit('[WARN] Could not kill the "{}" world, PID file does not exist.'.format(world))
elif arguments['start']:
for world in selected_worlds:
if not world.start():
sys.exit('[FAIL] Error! Could not start the {} world.'.format(world))
else:
print('[ ok ] Minecraft is now running.')
elif arguments['stop']:
for world in selected_worlds:
if not world.stop():
sys.exit('[FAIL] Error! Could not stop the {} world.'.format(world))
else:
print('[ ok ] Minecraft is stopped.')
elif arguments['restart']:
for world in selected_worlds:
if not world.restart():
sys.exit('[FAIL] Error! Could not restart the {} world.'.format(world))
else:
print('[ ok ] Minecraft is now running.')
elif arguments['update'] or arguments['update-all']:
for world in selected_worlds:
if arguments['snapshot']:
world.update(arguments['<snapshot-id>'], snapshot=True, make_backup=not arguments['--no-backup'])
elif arguments['<version>']:
world.update(arguments['<version>'], make_backup=not arguments['--no-backup'])
else:
world.update(snapshot=True)
elif arguments['revert']:
for world in selected_worlds:
if arguments['snapshot']:
world.revert(arguments['<snapshot-id>'], snapshot=True, make_backup=not arguments['--no-backup'])
elif arguments['<version>']:
world.revert(arguments['<version>'], make_backup=not arguments['--no-backup'])
else:
world.revert()
elif arguments['backup']:
for world in selected_worlds:
world.backup()
elif arguments['status']:
exit1 = False
for world in selected_worlds:
mcversion = "" if world.version() == "" else "(Minecraft {}) ".format(world.version())
if world.status():
print('[info] The "{}" world {}is running with PID {}.'.format(world, mcversion, world.pid))
else:
exit1 = True
if world.pidstatus():
print('[info] The "{}" world is running but the socket file does not exist. Please kill the world and restart.'.format(world))
else:
print('[info] The "{}" world {}is not running.'.format(world, mcversion))
if exit1:
sys.exit(1)
elif arguments['command']:
selected_worlds = list(selected_worlds)
for world in selected_worlds:
if len(selected_worlds) > 1:
print('[info] running command on {} world'.format(world))
cmdlog = world.command(arguments['<command>'][0], arguments['<command>'][1:])
for line in cmdlog.splitlines():
print(str(line))
elif arguments['saves']:
for world in selected_worlds:
if arguments['on']:
world.save_on()
elif arguments['off']:
world.save_off()
else:
raise NotImplementedError('Subcommand not implemented')
else:
raise NotImplementedError('Subcommand not implemented')
|
mit
| -1,580,113,770,325,115,400
| 45.52019
| 269
| 0.584146
| false
| 4.257146
| true
| false
| false
|
davidoj/RL_Aggregation
|
Agents.py
|
1
|
8730
|
'''
Reinforcement learning agents.
David Johnston 2015
'''
import numpy as np
import collections
import numbers
import random
random.seed(1)
class OnlineAgent:
"""
Generic online agent class; executes e-greedy policy, looks up values
"""
def __init__(self,problem,epsilon=1e-1,tiles=False):
self.epsilon = epsilon
self.problem = problem
self.qValues = problem.getZeroQTable()
self.reset = self.problem.reset
if tiles:
self.getQValue = self.getQTile
else:
self.getQValue = self.getQDisc
def executePolicy(self, state ,tiebreak='first'):
qs = self.getQArray(state)
test = random.random()
if test < self.epsilon:
return random.choice(range(len(qs)))
elif tiebreak == 'first':
return np.where(qs==max(qs))[0][0]
elif tiebreak == 'random':
return random.choice(np.where(qs==max(qs))[0])
def episode(self,deltaMin=1e-3,timeout=int(1e5),decayAlpha=True):
'''
Runs an episode, updates q-values and returns the length of the episode.
'''
for i in range(timeout):
currentState = self.problem.getAgentState()
action = self.executePolicy(currentState)
self.preUpdate(currentState,action)
if self.problem.isEpisodic:
terminal, nextState, reward = self.problem.result(action)
if terminal:
self.update(currentState,nextState,action,reward,decayAlpha,
terminal=1)
self.problem.reset()
return i
else:
nextState, reward = self.problem.result(action)
self.update(currentState,nextState,action,reward,decayAlpha)
return i
def run_n_episodes(self,n,decayAlpha=False,timeout=int(1e5)):
e_lengths = []
e_avgs = np.zeros(int(np.log2(n)))
j = 1
for i in range(n):
l = self.episode(timeout=timeout,decayAlpha=decayAlpha)
if l<timeout:
e_lengths.append(l)
if i == 2**j:
s = min(1000,(len(e_lengths)+1)/2)
e_avgs[j-1]= np.average(e_lengths[-s:-1])
print(np.average(e_lengths[-s:-1]))
j += 1
else:
e_lengths.append(timeout)
self.reset()
print("Episode timed out {}".format(l))
return e_avgs
def getQDisc(self,state,action):
return self.qValues[state,action]
def getQTile(self,state,action):
return sum(self.qValues[state,action])
def getValue(self,state):
qValues = self.getQArray(state)
return max(qValues)
def getQArray(self,state):
return np.array([self.getQValue(state,a) for a in self.problem.actions])
class QAgent(OnlineAgent):
"""
Q-learning agent
"""
def __init__(self,problem,alpha=1e-1,
epsilon=1e-1):
OnlineAgent.__init__(self,problem,epsilon=epsilon)
self.alpha = problem.setAlpha(alpha)
self.counter = problem.getZeroQTable()
def update(self,state,nextState,action,reward,decayAlpha,terminal=0):
'''
Q-learning update. State is either an integer or list(array) of integers
'''
if terminal:
nextV = 0
else:
nextV = self.getValue(nextState)
currentQV = self.getQValue(state,action)
delta = reward - currentQV + self.problem.gamma*nextV
if decayAlpha:
alpha = self.alpha/(self.counter[state,action]+1)
else:
alpha = self.alpha
self.qValues[state,action] += alpha * delta
self.counter[state,action] += 1
def preUpdate(self,state,action):
return
class SarsaLambda(OnlineAgent):
"""
SARSA with eligibility traces
"""
def __init__(self,problem,alpha,lamda=0.5,policy='e-greedy',
epsilon=1e-1,debug=False):
OnlineAgent.__init__(self,problem,epsilon=epsilon)
self.alpha = problem.setAlpha(alpha)
self.e = problem.getZeroQTable()
self.counter = problem.getZeroQTable()
self.lamda = lamda
def reset(self):
self.problem.reset
self.e = problem.getZeroQTable()
def preUpdate(self,state,action):
self.e *= self.problem.gamma*self.lamda
for a in self.problem.actions:
if a == action:
self.e[state,a] = 1
else:
self.e[state,a] = 0
def update(self,state,nextState,action,reward,decayAlpha,terminal=0):
'''
Sarsa(Lambda) update
'''
nextAction = self.executePolicy(nextState,epsilon=self.epsilon)
if terminal:
nextV=0
else:
nextV = self.getQValue(nextState,nextAction)
delta = reward - self.getQValue(state,action)
delta += self.problem.gamma*nextV
if decayAlpha:
alpha = self.alpha*((self.counter[state]+1)**(-1))
else:
alpha = self.alpha
self.counter[state,action] += 1
self.qValues += delta*alpha*self.e
class VIAgent():
"""
Offline value iteration agent
"""
def __init__(self,problem, policy="e-greedy",epsilon=1e-1,timeout=int(1e6)):
'''
Must be initialised with a problem with known transition and reward matrices
'''
self.problem = problem
self.epsilon = epsilon
self.qValues = problem.getZeroQTable()
self.transitionMatrix = problem.transitions
self.rewardMatrix = problem.rewards
self.timeout = timeout
#if policy == "e-greedy":
self.policyMatrix = np.zeros(self.qValues.shape) + 1/self.qValues.shape[0]
def executePolicy(self, state, epsilon=1e-1,tiebreak='random'):
qs = self.getQArray(state)
test = random.random()
if test < epsilon:
return random.choice(range(len(qs)))
elif tiebreak == 'first':
return np.where(qs==max(qs))[0][0]
elif tiebreak == 'random':
return random.choice(np.where(qs==max(qs))[0])
def getQValue(self,state,action):
'''
Get Q(s,a). S may be either an integer of list of ints if
function approximation is used.
'''
if isinstance(state,collections.Container):
state=np.array(state)
return sum(self.qValues[state,action])
return self.qValues[state,action]
def getValue(self,state):
qValues = self.getQArray(state)
return max(qValues)
def getQArray(self,state):
return np.array([self.getQValue(state,a) for a in self.problem.actions])
def greedifyPolicy(self,epsilon=1e-1):
old_policy = self.policyMatrix
self.policyMatrix = np.full_like(self.policyMatrix,epsilon/self.qValues.shape[0])
for state, policy in enumerate(self.policyMatrix):
policy_choice = self.executePolicy(state,epsilon=0)
policy[policy_choice] += 1-epsilon
if (self.policyMatrix == old_policy).all():
return 1
else:
return 0
def VISweep(self):
while True:
self.evalPolicy()
if self.greedifyPolicy():
break
def evalPolicy(self, deltaMin=1e-5):
delta = float('inf')
counter = 0
while delta>deltaMin and counter<self.timeout:
delta = 0
for state, aValues in enumerate(self.qValues):
for action, action_value in enumerate(aValues):
temp = action_value
states = range(len(self.qValues))
new_values = [self.transitionMatrix[action,state,nstate]*
(self.rewardMatrix[action,state,nstate]+
self.problem.gamma*self.getValue(nstate))
for nstate in states ]
new_action_value = sum(new_values)
self.qValues[state,action] = new_action_value
delta = max(delta, abs(temp-new_action_value))
counter += 1
if counter >= self.timeout-1:
print("Value iteration did not converge, delta = {}".format(delta))
|
mit
| -4,746,742,530,396,633,000
| 28.493243
| 89
| 0.548454
| false
| 4.028611
| false
| false
| false
|
SNeuhausen/training_management
|
models/resource_analysis/trainer_workload_analyzer.py
|
1
|
4295
|
# -*- coding: utf-8 -*-
from openerp import api, models
from openerp.addons.training_management.models.model_names import ModelNames
from openerp.addons.training_management.utils.date_utils import DateUtils
class TrainerWorkloadAnalyzer(models.AbstractModel):
_name = ModelNames.TRAINER_WORKLOAD_ANALYZER
@api.model
def compute_trainer_workload_data(self, start_date, end_date):
start_date, end_date = DateUtils.convert_to_dates(start_date, end_date)
first_week = DateUtils.get_monday_of_week(start_date)
last_week = DateUtils.get_friday_of_week(end_date)
trainer_workload_data = {
"weeks_to_display": [],
"trainer_info": {},
"workloads": {},
"workload_totals": {},
}
current_week = first_week
while current_week <= last_week:
year_week = DateUtils.build_year_week_string_from_date(current_week)
trainer_workload_data["weeks_to_display"].append(year_week)
current_week += DateUtils.ONE_WEEK_TIME_DELTA
partner_model = self.env[ModelNames.PARTNER]
trainers = partner_model.search([("is_trainer", "=", True)])
for trainer in trainers:
trainer_id = str(trainer.id)
trainer_workload_data["workloads"][trainer_id] = {}
self._add_trainer_info(trainer_workload_data, trainer)
resources = self._find_resources_in_range_having_trainer(first_week, last_week, trainers)
self._update_trainer_workload_data_from_resources(resources, trainer_workload_data)
workloads = trainer_workload_data["workloads"]
for trainer_id, trainer_workload in workloads.iteritems():
lesson_total = sum(trainer_workload.values())
trainer_workload_data["workload_totals"][trainer_id] = lesson_total;
return trainer_workload_data
@staticmethod
def _add_trainer_info(trainer_workload_data, trainer):
trainer_info = trainer_workload_data["trainer_info"]
trainer_id = str(trainer.id)
if trainer_id not in trainer_info:
trainer_info[trainer_id] = {}
trainer_info[trainer_id].update({
"color_name": trainer.color_name,
"name": u"{surname}, {forename}".format(surname=trainer.surname, forename=trainer.forename),
})
def _update_trainer_workload_data_from_resources(self, resources, trainer_workload_data):
for resource in resources:
if not resource.trainer_id:
continue
trainer_id = str(resource.trainer_id.id)
year_week = resource.year_week_string
workloads = trainer_workload_data["workloads"]
if trainer_id not in workloads:
workloads[trainer_id] = {}
self._add_trainer_info(trainer_workload_data, resource.trainer_id)
trainer_workload = workloads[trainer_id]
if year_week not in trainer_workload:
trainer_workload[year_week] = 0
trainer_workload[year_week] += resource.get_lesson_count()
def _find_resources_in_range_having_trainer(self, start_date, end_date, trainers):
resource_model = self.env[ModelNames.RESOURCE]
domain = [
("date", ">=", DateUtils.convert_to_string(start_date)),
("date", "<=", DateUtils.convert_to_string(end_date)),
("trainer_id", "in", trainers.ids),
]
return resource_model.search(domain)
@api.model
@api.returns("self")
def find_trainers_with_main_location(self, main_location_id):
trainer_model = self.env[ModelNames.TRAINER]
domain = [
("is_trainer", "=", True),
("main_location_id", "=", main_location_id)
]
trainers = trainer_model.search(domain)
return trainers
def _find_trainers_for_user_locations(self):
location_model = self.env[ModelNames.LOCATION]
trainer_model = self.env[ModelNames.TRAINER]
user_locations = location_model.search([("user_ids", "in", [self.env.user.id])])
domain = [
("is_trainer", "=", True),
("main_location_id", "in", user_locations.ids)
]
trainers = trainer_model.search(domain)
return trainers
|
gpl-3.0
| -8,989,901,224,261,650,000
| 41.524752
| 104
| 0.618859
| false
| 3.546656
| false
| false
| false
|
petrblahos/pyramid_locmako
|
pyramid_locmako/scaffolds/__init__.py
|
1
|
1383
|
import subprocess
import sys
from pyramid.scaffolds import PyramidTemplate
class LocmakoTemplate(PyramidTemplate):
_template_dir = 'pyramid_locmako'
summary = 'pyramid project with Mako and Localization'
def post(self, command, output_dir, vars):
print "=== POST", command, output_dir, vars
subprocess.call([ sys.executable, "setup.py", "extract_messages" ], cwd=output_dir)
while 1:
lc = raw_input("Language to initialize: (enter to skip)")
if not lc:
break
if 2!=len(lc) or not lc.isalpha():
print "sorry, need 2 letters, nothing more"
continue
subprocess.call([ sys.executable, "setup.py", "init_catalog", "-l", lc ], cwd=output_dir)
subprocess.call([ sys.executable, "setup.py", "update_catalog" ], cwd=output_dir)
subprocess.call([ sys.executable, "setup.py", "compile_catalog" ], cwd=output_dir)
return super(self.__class__, self).post(command, output_dir, vars)
def pre(self, command, output_dir, vars):
return super(self.__class__, self).pre(command, output_dir, vars)
def template_dir(self):
return super(self.__class__, self).template_dir()
def render_template(self, content, vars, filename=None):
return super(self.__class__, self).render_template(content, vars, filename)
|
mit
| -4,874,192,753,432,911,000
| 40.909091
| 101
| 0.62979
| false
| 3.841667
| false
| false
| false
|
turbokongen/home-assistant
|
homeassistant/components/bond/config_flow.py
|
1
|
4278
|
"""Config flow for Bond integration."""
import logging
from typing import Any, Dict, Optional
from aiohttp import ClientConnectionError, ClientResponseError
from bond_api import Bond
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
HTTP_UNAUTHORIZED,
)
from .const import CONF_BOND_ID
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA_USER = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_ACCESS_TOKEN): str}
)
DATA_SCHEMA_DISCOVERY = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
async def _validate_input(data: Dict[str, Any]) -> str:
"""Validate the user input allows us to connect."""
try:
bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN])
version = await bond.version()
# call to non-version API is needed to validate authentication
await bond.devices()
except ClientConnectionError as error:
raise InputValidationError("cannot_connect") from error
except ClientResponseError as error:
if error.status == HTTP_UNAUTHORIZED:
raise InputValidationError("invalid_auth") from error
raise InputValidationError("unknown") from error
except Exception as error:
_LOGGER.exception("Unexpected exception")
raise InputValidationError("unknown") from error
# Return unique ID from the hub to be stored in the config entry.
bond_id = version.get("bondid")
if not bond_id:
raise InputValidationError("old_firmware")
return bond_id
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Bond."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
_discovered: dict = None
async def async_step_zeroconf(
self, discovery_info: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by zeroconf discovery."""
name: str = discovery_info[CONF_NAME]
host: str = discovery_info[CONF_HOST]
bond_id = name.partition(".")[0]
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured({CONF_HOST: host})
self._discovered = {
CONF_HOST: host,
CONF_BOND_ID: bond_id,
}
self.context.update({"title_placeholders": self._discovered})
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle confirmation flow for discovered bond hub."""
errors = {}
if user_input is not None:
data = user_input.copy()
data[CONF_HOST] = self._discovered[CONF_HOST]
try:
return await self._try_create_entry(data)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="confirm",
data_schema=DATA_SCHEMA_DISCOVERY,
errors=errors,
description_placeholders=self._discovered,
)
async def async_step_user(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
return await self._try_create_entry(user_input)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
async def _try_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]:
bond_id = await _validate_input(data)
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=bond_id, data=data)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str):
"""Initialize with error base."""
super().__init__()
self.base = base
|
apache-2.0
| 4,549,398,951,194,947,600
| 32.421875
| 78
| 0.63511
| false
| 4.047304
| true
| false
| false
|
ubuntu-core/snapcraft
|
snapcraft/internal/sources/_base.py
|
1
|
5773
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import requests
import shutil
import subprocess
import sys
import snapcraft.internal.common
from snapcraft.internal.cache import FileCache
from snapcraft.internal.indicators import (
download_requests_stream,
download_urllib_source,
)
from ._checksum import split_checksum, verify_checksum
from . import errors
class Base:
def __init__(
self,
source,
source_dir,
source_tag=None,
source_commit=None,
source_branch=None,
source_depth=None,
source_checksum=None,
command=None,
):
self.source = source
self.source_dir = source_dir
self.source_tag = source_tag
self.source_commit = source_commit
self.source_branch = source_branch
self.source_depth = source_depth
self.source_checksum = source_checksum
self.source_details = None
self.command = command
self._checked = False
def check(self, target: str):
"""Check if pulled sources have changed since target was created.
:param str target: Path to target file.
"""
self._checked = True
return self._check(target)
def update(self):
"""Update pulled source.
:raises RuntimeError: If this function is called before `check()`.
"""
if not self._checked:
# This is programmer error
raise RuntimeError("source must be checked before it's updated")
self._update()
def _check(self, target: str):
"""Check if pulled sources have changed since target was created.
:param str target: Path to target file.
"""
raise errors.SourceUpdateUnsupportedError(self)
def _update(self):
"""Update pulled source."""
raise errors.SourceUpdateUnsupportedError(self)
def _run(self, command, **kwargs):
try:
subprocess.check_call(command, **kwargs)
except subprocess.CalledProcessError as e:
raise errors.SnapcraftPullError(command, e.returncode)
def _run_output(self, command, **kwargs):
try:
return (
subprocess.check_output(command, **kwargs)
.decode(sys.getfilesystemencoding())
.strip()
)
except subprocess.CalledProcessError as e:
raise errors.SnapcraftPullError(command, e.returncode)
class FileBase(Base):
def pull(self):
source_file = None
is_source_url = snapcraft.internal.common.isurl(self.source)
# First check if it is a url and download and if not
# it is probably locally referenced.
if is_source_url:
source_file = self.download()
else:
basename = os.path.basename(self.source)
source_file = os.path.join(self.source_dir, basename)
# We make this copy as the provisioning logic can delete
# this file and we don't want that.
try:
shutil.copy2(self.source, source_file)
except FileNotFoundError as exc:
raise errors.SnapcraftSourceNotFoundError(self.source) from exc
# Verify before provisioning
if self.source_checksum:
verify_checksum(self.source_checksum, source_file)
# We finally provision, but we don't clean the target so override-pull
# can actually have meaning when using these sources.
self.provision(self.source_dir, src=source_file, clean_target=False)
def download(self, filepath: str = None) -> str:
if filepath is None:
self.file = os.path.join(self.source_dir, os.path.basename(self.source))
else:
self.file = filepath
# First check if we already have the source file cached.
file_cache = FileCache()
if self.source_checksum:
algorithm, hash = split_checksum(self.source_checksum)
cache_file = file_cache.get(algorithm=algorithm, hash=hash)
if cache_file:
# We make this copy as the provisioning logic can delete
# this file and we don't want that.
shutil.copy2(cache_file, self.file)
return self.file
# If not we download and store
if snapcraft.internal.common.get_url_scheme(self.source) == "ftp":
download_urllib_source(self.source, self.file)
else:
try:
request = requests.get(self.source, stream=True, allow_redirects=True)
request.raise_for_status()
except requests.exceptions.RequestException as e:
raise errors.SnapcraftRequestError(message=e)
download_requests_stream(request, self.file)
# We verify the file if source_checksum is defined
# and we cache the file for future reuse.
if self.source_checksum:
algorithm, digest = verify_checksum(self.source_checksum, self.file)
file_cache.cache(filename=self.file, algorithm=algorithm, hash=hash)
return self.file
|
gpl-3.0
| -2,380,685,263,678,600,000
| 34.417178
| 86
| 0.63208
| false
| 4.373485
| false
| false
| false
|
looopTools/sw9-source
|
.waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/Tools/waf_unit_test.py
|
1
|
5372
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys
from waflib.TaskGen import feature,after_method,taskgen_method
from waflib import Utils,Task,Logs,Options
from waflib.Tools import ccroot
testlock=Utils.threading.Lock()
SCRIPT_TEMPLATE="""#! %(python)s
import subprocess, sys
cmd = %(cmd)r
# if you want to debug with gdb:
#cmd = ['gdb', '-args'] + cmd
env = %(env)r
status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
sys.exit(status)
"""
@feature('test')
@after_method('apply_link','process_use')
def make_test(self):
if not getattr(self,'link_task',None):
return
tsk=self.create_task('utest',self.link_task.outputs)
if getattr(self,'ut_str',None):
self.ut_run,lst=Task.compile_fun(self.ut_str,shell=getattr(self,'ut_shell',False))
tsk.vars=lst+tsk.vars
if getattr(self,'ut_cwd',None):
if isinstance(self.ut_cwd,str):
if os.path.isabs(self.ut_cwd):
self.ut_cwd=self.bld.root.make_node(self.ut_cwd)
else:
self.ut_cwd=self.path.make_node(self.ut_cwd)
else:
self.ut_cwd=tsk.inputs[0].parent
if not hasattr(self,'ut_paths'):
paths=[]
for x in self.tmp_use_sorted:
try:
y=self.bld.get_tgen_by_name(x).link_task
except AttributeError:
pass
else:
if not isinstance(y,ccroot.stlink_task):
paths.append(y.outputs[0].parent.abspath())
self.ut_paths=os.pathsep.join(paths)+os.pathsep
if not hasattr(self,'ut_env'):
self.ut_env=dct=dict(os.environ)
def add_path(var):
dct[var]=self.ut_paths+dct.get(var,'')
if Utils.is_win32:
add_path('PATH')
elif Utils.unversioned_sys_platform()=='darwin':
add_path('DYLD_LIBRARY_PATH')
add_path('LD_LIBRARY_PATH')
else:
add_path('LD_LIBRARY_PATH')
@taskgen_method
def add_test_results(self,tup):
Logs.debug("ut: %r",tup)
self.utest_result=tup
try:
self.bld.utest_results.append(tup)
except AttributeError:
self.bld.utest_results=[tup]
class utest(Task.Task):
color='PINK'
after=['vnum','inst']
vars=[]
def runnable_status(self):
if getattr(Options.options,'no_tests',False):
return Task.SKIP_ME
ret=super(utest,self).runnable_status()
if ret==Task.SKIP_ME:
if getattr(Options.options,'all_tests',False):
return Task.RUN_ME
return ret
def get_test_env(self):
return self.generator.ut_env
def post_run(self):
super(utest,self).post_run()
if getattr(Options.options,'clear_failed_tests',False)and self.waf_unit_test_results[1]:
self.generator.bld.task_sigs[self.uid()]=None
def run(self):
if hasattr(self.generator,'ut_run'):
return self.generator.ut_run(self)
self.ut_exec=getattr(self.generator,'ut_exec',[self.inputs[0].abspath()])
if getattr(self.generator,'ut_fun',None):
self.generator.ut_fun(self)
testcmd=getattr(self.generator,'ut_cmd',False)or getattr(Options.options,'testcmd',False)
if testcmd:
self.ut_exec=(testcmd%' '.join(self.ut_exec)).split(' ')
return self.exec_command(self.ut_exec)
def exec_command(self,cmd,**kw):
Logs.debug('runner: %r',cmd)
if getattr(Options.options,'dump_test_scripts',False):
global SCRIPT_TEMPLATE
script_code=SCRIPT_TEMPLATE%{'python':sys.executable,'env':self.get_test_env(),'cwd':self.get_cwd().abspath(),'cmd':cmd}
script_file=self.inputs[0].abspath()+'_run.py'
Utils.writef(script_file,script_code)
os.chmod(script_file,Utils.O755)
if Logs.verbose>1:
Logs.info('Test debug file written as %r'%script_file)
proc=Utils.subprocess.Popen(cmd,cwd=self.get_cwd().abspath(),env=self.get_test_env(),stderr=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE)
(stdout,stderr)=proc.communicate()
self.waf_unit_test_results=tup=(self.inputs[0].abspath(),proc.returncode,stdout,stderr)
testlock.acquire()
try:
return self.generator.add_test_results(tup)
finally:
testlock.release()
def get_cwd(self):
return self.generator.ut_cwd
def summary(bld):
lst=getattr(bld,'utest_results',[])
if lst:
Logs.pprint('CYAN','execution summary')
total=len(lst)
tfail=len([x for x in lst if x[1]])
Logs.pprint('CYAN',' tests that pass %d/%d'%(total-tfail,total))
for(f,code,out,err)in lst:
if not code:
Logs.pprint('CYAN',' %s'%f)
Logs.pprint('CYAN',' tests that fail %d/%d'%(tfail,total))
for(f,code,out,err)in lst:
if code:
Logs.pprint('CYAN',' %s'%f)
def set_exit_code(bld):
lst=getattr(bld,'utest_results',[])
for(f,code,out,err)in lst:
if code:
msg=[]
if out:
msg.append('stdout:%s%s'%(os.linesep,out.decode('utf-8')))
if err:
msg.append('stderr:%s%s'%(os.linesep,err.decode('utf-8')))
bld.fatal(os.linesep.join(msg))
def options(opt):
opt.add_option('--notests',action='store_true',default=False,help='Exec no unit tests',dest='no_tests')
opt.add_option('--alltests',action='store_true',default=False,help='Exec all unit tests',dest='all_tests')
opt.add_option('--clear-failed',action='store_true',default=False,help='Force failed unit tests to run again next time',dest='clear_failed_tests')
opt.add_option('--testcmd',action='store',default=False,help='Run the unit tests using the test-cmd string'' example "--test-cmd="valgrind --error-exitcode=1'' %s" to run under valgrind',dest='testcmd')
opt.add_option('--dump-test-scripts',action='store_true',default=False,help='Create python scripts to help debug tests',dest='dump_test_scripts')
|
mit
| -4,228,622,893,590,671,400
| 36.566434
| 203
| 0.698436
| false
| 2.761954
| true
| false
| false
|
Strubbl/matekate
|
refresh.py
|
1
|
3413
|
#!/usr/bin/python
import cgi
import logging
import urllib2
import simplejson
import os
import sys
try:
loglevel = sys.argv[1]
except IndexError:
loglevel = None
if loglevel == '-d':
logging.basicConfig(level=logging.DEBUG)
logging.debug('set logging lvl to debug')
scriptdir = os.path.dirname(os.path.abspath(__file__))
f = urllib2.urlopen('http://overpass-api.de/api/interpreter?data=[out:json];(node["drink:club-mate"~"."];>;way["drink:club-mate"~"."];>;);out;')
try:
json = simplejson.load(f)
except simplejson.JSONDecodeError, e:
print(e)
sys.exit(1)
f.close()
nodes = {}
counter = 0
with open(scriptdir + '/js/club-mate-data.js', 'w') as f:
logging.debug('enter file loop')
f.write('function mate_locations_populate(markers) {\n')
for e in json['elements']:
ide = e['id']
lat = e.get('lat', None)
lon = e.get('lon', None)
typ = e['type']
tags = e.get('tags', {})
logging.debug('Element id=%s type=%s tags=%s', ide, typ, tags)
for k in tags.keys():
tags[k] = cgi.escape(tags[k]).replace('"', '\\"')
if typ == 'node':
nodes[ide] = (lat,lon)
if typ == 'way':
lat, lon = nodes[e['nodes'][0]] # extract coordinate of first node
logging.debug('Element id=%s lat=%s lon=%s', ide, lat, lon)
if not lat or not lon:
logging.warn('Element id=%s has missing lat=%s or lon=%s', ide, lat, lon)
if 'name' in tags:
name = tags['name']
else:
name = '%s %s' % (typ, ide)
if tags.get('drink:club-mate') == None:
logging.debug('This node has no tag drink:club-mate at all')
continue
elif tags.get('drink:club-mate') == 'retail':
icon = "icon_retail"
elif tags.get('drink:club-mate') == 'served':
icon = "icon_served"
else:
icon = "icon_normal"
popup = '<b>%s</b> <a href=\\"http://openstreetmap.org/browse/%s/%s\\" target=\\"_blank\\">*</a><hr/>' % (name, typ, ide)
if 'addr:street' in tags:
popup += '%s %s<br/>' % (tags.get('addr:street', ''), tags.get('addr:housenumber', ''))
if 'addr:city' in tags:
popup += '%s %s<br/>' % (tags.get('addr:postcode', ''), tags.get('addr:city', ''))
if 'addr:country' in tags:
popup += '%s<br/>' % (tags.get('addr:country', ''))
popup += '<hr/>'
if 'opening_hours' in tags:
popup += 'opening hours: %s<br/>' % (tags['opening_hours'])
if 'contact:website' in tags:
popup += 'website: <a href=\\"%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['contact:website'], tags['contact:website'])
elif 'website' in tags:
popup += 'website: <a href=\\"%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['website'], tags['website'])
if 'contact:email' in tags:
popup += 'email: <a href=\\"mailto:%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['contact:email'], tags['contact:email'])
elif 'email' in tags:
popup += 'email: <a href=\\"mailto:%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['email'], tags['email'])
if 'contact:phone' in tags:
popup += 'phone: %s<br/>' % (tags['contact:phone'])
elif 'phone' in tags:
popup += 'phone: %s<br/>' % (tags['phone'])
f.write(' markers.addLayer(L.marker([%s, %s], {"title": "%s", "icon": %s}).bindPopup("%s"));\n' % (lat, lon, name.encode('utf-8'), icon, popup.encode('utf-8')))
counter += 1
f.write('}\n')
logging.info('added %i elements to data file', counter)
sys.exit(0)
|
gpl-3.0
| 2,305,478,928,946,069,800
| 33.13
| 165
| 0.572224
| false
| 2.947323
| false
| false
| false
|
IBM-Security/ibmsecurity
|
ibmsecurity/isam/base/cluster/trace.py
|
1
|
1483
|
import logging
logger = logging.getLogger(__name__)
requires_model="Appliance"
try:
basestring
except NameError:
basestring = (str, bytes)
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve the tracing levels
"""
return isamAppliance.invoke_get("Retrieve the tracing levels",
"/isam/cluster/tracing/v1", requires_model=requires_model)
def _check(isamAppliance, dsc):
check_value,warnings = True,""
ret_obj = get(isamAppliance)
warnings = ret_obj['warnings']
if isinstance(dsc, basestring):
import ast
dsc = ast.literal_eval(dsc)
if 'dsc' in ret_obj['data']:
check_value = (ret_obj['data']['dsc']==dsc)
return check_value,warnings
else:
check_value=True
return check_value,warnings
def set(isamAppliance, dsc, check_mode=False, force=False):
"""
Updating the tracing levels
"""
check_value,warnings = _check(isamAppliance, dsc)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Updating the tracing levels",
"/isam/cluster/tracing/v1",
{
'dsc': dsc
}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
|
apache-2.0
| -4,713,544,688,860,106,000
| 25.963636
| 94
| 0.612272
| false
| 3.802564
| false
| false
| false
|
gov-cjwaszczuk/notifications-admin
|
app/notify_client/notification_api_client.py
|
1
|
2954
|
from app.notify_client import _attach_current_user, NotifyAdminAPIClient
class NotificationApiClient(NotifyAdminAPIClient):
def __init__(self):
super().__init__("a" * 73, "b")
def init_app(self, app):
self.base_url = app.config['API_HOST_NAME']
self.service_id = app.config['ADMIN_CLIENT_USER_NAME']
self.api_key = app.config['ADMIN_CLIENT_SECRET']
def get_notifications_for_service(
self,
service_id,
job_id=None,
template_type=None,
status=None,
page=None,
page_size=None,
limit_days=None,
include_jobs=None,
include_from_test_key=None,
format_for_csv=None,
to=None,
):
params = {}
if page is not None:
params['page'] = page
if page_size is not None:
params['page_size'] = page_size
if template_type is not None:
params['template_type'] = template_type
if status is not None:
params['status'] = status
if include_jobs is not None:
params['include_jobs'] = include_jobs
if include_from_test_key is not None:
params['include_from_test_key'] = include_from_test_key
if format_for_csv is not None:
params['format_for_csv'] = format_for_csv
if to is not None:
params['to'] = to
if job_id:
return self.get(
url='/service/{}/job/{}/notifications'.format(service_id, job_id),
params=params
)
else:
if limit_days is not None:
params['limit_days'] = limit_days
return self.get(
url='/service/{}/notifications'.format(service_id),
params=params
)
def send_notification(self, service_id, *, template_id, recipient, personalisation, sender_id):
data = {
'template_id': template_id,
'to': recipient,
'personalisation': personalisation,
}
if sender_id:
data['sender_id'] = sender_id
data = _attach_current_user(data)
return self.post(url='/service/{}/send-notification'.format(service_id), data=data)
def get_notification(self, service_id, notification_id):
return self.get(url='/service/{}/notifications/{}'.format(service_id, notification_id))
def get_api_notifications_for_service(self, service_id):
ret = self.get_notifications_for_service(service_id, include_jobs=False, include_from_test_key=True)
return self.map_letters_to_accepted(ret)
@staticmethod
def map_letters_to_accepted(notifications):
for notification in notifications['notifications']:
if notification['notification_type'] == 'letter' and notification['status'] in ('created', 'sending'):
notification['status'] = 'accepted'
return notifications
|
mit
| 5,493,466,503,666,718,000
| 35.925
| 114
| 0.578876
| false
| 4.068871
| false
| false
| false
|
NaohiroTamura/python-ironicclient
|
ironicclient/common/http.py
|
1
|
25914
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from distutils.version import StrictVersion
import functools
import hashlib
import logging
import os
import socket
import ssl
import textwrap
import time
from keystoneauth1 import adapter
from keystoneauth1 import exceptions as kexc
from oslo_serialization import jsonutils
from oslo_utils import strutils
import requests
import six
from six.moves import http_client
import six.moves.urllib.parse as urlparse
from ironicclient.common import filecache
from ironicclient.common.i18n import _
from ironicclient.common.i18n import _LE
from ironicclient.common.i18n import _LW
from ironicclient import exc
# NOTE(deva): Record the latest version that this client was tested with.
# We still have a lot of work to do in the client to implement
# microversion support in the client properly! See
# http://specs.openstack.org/openstack/ironic-specs/specs/kilo/api-microversions.html # noqa
# for full details.
DEFAULT_VER = '1.9'
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-ironicclient'
CHUNKSIZE = 1024 * 64 # 64kB
API_VERSION = '/v1'
API_VERSION_SELECTED_STATES = ('user', 'negotiated', 'cached', 'default')
DEFAULT_MAX_RETRIES = 5
DEFAULT_RETRY_INTERVAL = 2
SENSITIVE_HEADERS = ('X-Auth-Token',)
SUPPORTED_ENDPOINT_SCHEME = ('http', 'https')
def _trim_endpoint_api_version(url):
"""Trim API version and trailing slash from endpoint."""
return url.rstrip('/').rstrip(API_VERSION)
def _extract_error_json(body):
"""Return error_message from the HTTP response body."""
error_json = {}
try:
body_json = jsonutils.loads(body)
if 'error_message' in body_json:
raw_msg = body_json['error_message']
error_json = jsonutils.loads(raw_msg)
except ValueError:
pass
return error_json
def get_server(endpoint):
"""Extract and return the server & port that we're connecting to."""
if endpoint is None:
return None, None
parts = urlparse.urlparse(endpoint)
return parts.hostname, str(parts.port)
class VersionNegotiationMixin(object):
def negotiate_version(self, conn, resp):
"""Negotiate the server version
Assumption: Called after receiving a 406 error when doing a request.
param conn: A connection object
param resp: The response object from http request
"""
if self.api_version_select_state not in API_VERSION_SELECTED_STATES:
raise RuntimeError(
_('Error: self.api_version_select_state should be one of the '
'values in: "%(valid)s" but had the value: "%(value)s"') %
{'valid': ', '.join(API_VERSION_SELECTED_STATES),
'value': self.api_version_select_state})
min_ver, max_ver = self._parse_version_headers(resp)
# NOTE: servers before commit 32fb6e99 did not return version headers
# on error, so we need to perform a GET to determine
# the supported version range
if not max_ver:
LOG.debug('No version header in response, requesting from server')
if self.os_ironic_api_version:
base_version = ("/v%s" %
str(self.os_ironic_api_version).split('.')[0])
else:
base_version = API_VERSION
resp = self._make_simple_request(conn, 'GET', base_version)
min_ver, max_ver = self._parse_version_headers(resp)
# If the user requested an explicit version or we have negotiated a
# version and still failing then error now. The server could
# support the version requested but the requested operation may not
# be supported by the requested version.
if self.api_version_select_state == 'user':
raise exc.UnsupportedVersion(textwrap.fill(
_("Requested API version %(req)s is not supported by the "
"server or the requested operation is not supported by the "
"requested version. Supported version range is %(min)s to "
"%(max)s")
% {'req': self.os_ironic_api_version,
'min': min_ver, 'max': max_ver}))
if self.api_version_select_state == 'negotiated':
raise exc.UnsupportedVersion(textwrap.fill(
_("No API version was specified and the requested operation "
"was not supported by the client's negotiated API version "
"%(req)s. Supported version range is: %(min)s to %(max)s")
% {'req': self.os_ironic_api_version,
'min': min_ver, 'max': max_ver}))
negotiated_ver = str(min(StrictVersion(self.os_ironic_api_version),
StrictVersion(max_ver)))
if negotiated_ver < min_ver:
negotiated_ver = min_ver
# server handles microversions, but doesn't support
# the requested version, so try a negotiated version
self.api_version_select_state = 'negotiated'
self.os_ironic_api_version = negotiated_ver
LOG.debug('Negotiated API version is %s', negotiated_ver)
# Cache the negotiated version for this server
host, port = get_server(self.endpoint)
filecache.save_data(host=host, port=port, data=negotiated_ver)
return negotiated_ver
def _generic_parse_version_headers(self, accessor_func):
min_ver = accessor_func('X-OpenStack-Ironic-API-Minimum-Version',
None)
max_ver = accessor_func('X-OpenStack-Ironic-API-Maximum-Version',
None)
return min_ver, max_ver
def _parse_version_headers(self, accessor_func):
# NOTE(jlvillal): Declared for unit testing purposes
raise NotImplementedError()
def _make_simple_request(self, conn, method, url):
# NOTE(jlvillal): Declared for unit testing purposes
raise NotImplementedError()
_RETRY_EXCEPTIONS = (exc.Conflict, exc.ServiceUnavailable,
exc.ConnectionRefused, kexc.RetriableConnectionFailure)
def with_retries(func):
"""Wrapper for _http_request adding support for retries."""
@functools.wraps(func)
def wrapper(self, url, method, **kwargs):
if self.conflict_max_retries is None:
self.conflict_max_retries = DEFAULT_MAX_RETRIES
if self.conflict_retry_interval is None:
self.conflict_retry_interval = DEFAULT_RETRY_INTERVAL
num_attempts = self.conflict_max_retries + 1
for attempt in range(1, num_attempts + 1):
try:
return func(self, url, method, **kwargs)
except _RETRY_EXCEPTIONS as error:
msg = (_LE("Error contacting Ironic server: %(error)s. "
"Attempt %(attempt)d of %(total)d") %
{'attempt': attempt,
'total': num_attempts,
'error': error})
if attempt == num_attempts:
LOG.error(msg)
raise
else:
LOG.debug(msg)
time.sleep(self.conflict_retry_interval)
return wrapper
class HTTPClient(VersionNegotiationMixin):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.endpoint_trimmed = _trim_endpoint_api_version(endpoint)
self.auth_token = kwargs.get('token')
self.auth_ref = kwargs.get('auth_ref')
self.os_ironic_api_version = kwargs.get('os_ironic_api_version',
DEFAULT_VER)
self.api_version_select_state = kwargs.get(
'api_version_select_state', 'default')
self.conflict_max_retries = kwargs.pop('max_retries',
DEFAULT_MAX_RETRIES)
self.conflict_retry_interval = kwargs.pop('retry_interval',
DEFAULT_RETRY_INTERVAL)
self.session = requests.Session()
parts = urlparse.urlparse(endpoint)
if parts.scheme not in SUPPORTED_ENDPOINT_SCHEME:
msg = _('Unsupported scheme: %s') % parts.scheme
raise exc.EndpointException(msg)
if parts.scheme == 'https':
if kwargs.get('insecure') is True:
self.session.verify = False
elif kwargs.get('ca_file'):
self.session.verify = kwargs['ca_file']
self.session.cert = (kwargs.get('cert_file'),
kwargs.get('key_file'))
def _process_header(self, name, value):
"""Redacts any sensitive header
Redact a header that contains sensitive information, by returning an
updated header with the sha1 hash of that value. The redacted value is
prefixed by '{SHA1}' because that's the convention used within
OpenStack.
:returns: A tuple of (name, value)
name: the safe encoding format of name
value: the redacted value if name is x-auth-token,
or the safe encoding format of name
"""
if name in SENSITIVE_HEADERS:
v = value.encode('utf-8')
h = hashlib.sha1(v)
d = h.hexdigest()
return (name, "{SHA1}%s" % d)
else:
return (name, value)
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % self._process_header(key, value)
curl.append(header)
if not self.session.verify:
curl.append('-k')
elif isinstance(self.session.verify, six.string_types):
curl.append('--cacert %s' % self.session.verify)
if self.session.cert:
curl.append('--cert %s' % self.session.cert[0])
curl.append('--key %s' % self.session.cert[1])
if 'body' in kwargs:
body = strutils.mask_password(kwargs['body'])
curl.append('-d \'%s\'' % body)
curl.append(urlparse.urljoin(self.endpoint_trimmed, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp, body=None):
# NOTE(aarefiev): resp.raw is urllib3 response object, it's used
# only to get 'version', response from request with 'stream = True'
# should be used for raw reading.
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.headers.items()])
dump.append('')
if body:
body = strutils.mask_password(body)
dump.extend([body, ''])
LOG.debug('\n'.join(dump))
def _make_connection_url(self, url):
return urlparse.urljoin(self.endpoint_trimmed, url)
def _parse_version_headers(self, resp):
return self._generic_parse_version_headers(resp.headers.get)
def _make_simple_request(self, conn, method, url):
return conn.request(method, self._make_connection_url(url))
@with_retries
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around request.Session.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.os_ironic_api_version:
kwargs['headers'].setdefault('X-OpenStack-Ironic-API-Version',
self.os_ironic_api_version)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
self.log_curl_request(method, url, kwargs)
# NOTE(aarefiev): This is for backwards compatibility, request
# expected body in 'data' field, previously we used httplib,
# which expected 'body' field.
body = kwargs.pop('body', None)
if body:
kwargs['data'] = body
conn_url = self._make_connection_url(url)
try:
resp = self.session.request(method,
conn_url,
**kwargs)
# TODO(deva): implement graceful client downgrade when connecting
# to servers that did not support microversions. Details here:
# http://specs.openstack.org/openstack/ironic-specs/specs/kilo/api-microversions.html#use-case-3b-new-client-communicating-with-a-old-ironic-user-specified # noqa
if resp.status_code == http_client.NOT_ACCEPTABLE:
negotiated_ver = self.negotiate_version(self.session, resp)
kwargs['headers']['X-OpenStack-Ironic-API-Version'] = (
negotiated_ver)
return self._http_request(url, method, **kwargs)
except requests.exceptions.RequestException as e:
message = (_("Error has occurred while handling "
"request for %(url)s: %(e)s") %
dict(url=conn_url, e=e))
# NOTE(aarefiev): not valid request(invalid url, missing schema,
# and so on), retrying is not needed.
if isinstance(e, ValueError):
raise exc.ValidationError(message)
raise exc.ConnectionRefused(message)
body_str = None
if resp.headers.get('Content-Type') == 'application/octet-stream':
body_iter = resp.iter_content(chunk_size=CHUNKSIZE)
self.log_http_response(resp)
else:
# Read body into string if it isn't obviously image data
body_str = resp.text
self.log_http_response(resp, body_str)
body_iter = six.StringIO(body_str)
if resp.status_code >= http_client.BAD_REQUEST:
error_json = _extract_error_json(body_str)
raise exc.from_response(
resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status_code in (http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.USE_PROXY):
# Redirected. Reissue the request to the new location.
return self._http_request(resp['location'], method, **kwargs)
elif resp.status_code == http_client.MULTIPLE_CHOICES:
raise exc.from_response(resp, method=method, url=url)
return resp, body_iter
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['body'] = jsonutils.dump_as_bytes(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
content_type = resp.headers.get('Content-Type')
if (resp.status_code in (http_client.NO_CONTENT,
http_client.RESET_CONTENT)
or content_type is None):
return resp, list()
if 'application/json' in content_type:
body = ''.join([chunk for chunk in body_iter])
try:
body = jsonutils.loads(body)
except ValueError:
LOG.error(_LE('Could not decode response body as JSON'))
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
class VerifiedHTTPSConnection(six.moves.http_client.HTTPSConnection):
"""httplib-compatible connection using client-side SSL authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file=None, cert_file=None,
ca_file=None, timeout=None, insecure=False):
six.moves.http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
if ca_file is not None:
self.ca_file = ca_file
else:
self.ca_file = self.get_system_ca_file()
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.insecure is True:
kwargs = {'cert_reqs': ssl.CERT_NONE}
else:
kwargs = {'cert_reqs': ssl.CERT_REQUIRED, 'ca_certs': self.ca_file}
if self.cert_file:
kwargs['certfile'] = self.cert_file
if self.key_file:
kwargs['keyfile'] = self.key_file
self.sock = ssl.wrap_socket(sock, **kwargs)
@staticmethod
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem']
for ca in ca_path:
if os.path.exists(ca):
return ca
return None
class SessionClient(VersionNegotiationMixin, adapter.LegacyJsonAdapter):
"""HTTP client based on Keystone client session."""
def __init__(self,
os_ironic_api_version,
api_version_select_state,
max_retries,
retry_interval,
endpoint,
**kwargs):
self.os_ironic_api_version = os_ironic_api_version
self.api_version_select_state = api_version_select_state
self.conflict_max_retries = max_retries
self.conflict_retry_interval = retry_interval
self.endpoint = endpoint
super(SessionClient, self).__init__(**kwargs)
def _parse_version_headers(self, resp):
return self._generic_parse_version_headers(resp.headers.get)
def _make_simple_request(self, conn, method, url):
# NOTE: conn is self.session for this class
return conn.request(url, method, raise_exc=False)
@with_retries
def _http_request(self, url, method, **kwargs):
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('auth', self.auth)
if isinstance(self.endpoint_override, six.string_types):
kwargs.setdefault(
'endpoint_override',
_trim_endpoint_api_version(self.endpoint_override)
)
if getattr(self, 'os_ironic_api_version', None):
kwargs['headers'].setdefault('X-OpenStack-Ironic-API-Version',
self.os_ironic_api_version)
endpoint_filter = kwargs.setdefault('endpoint_filter', {})
endpoint_filter.setdefault('interface', self.interface)
endpoint_filter.setdefault('service_type', self.service_type)
endpoint_filter.setdefault('region_name', self.region_name)
resp = self.session.request(url, method,
raise_exc=False, **kwargs)
if resp.status_code == http_client.NOT_ACCEPTABLE:
negotiated_ver = self.negotiate_version(self.session, resp)
kwargs['headers']['X-OpenStack-Ironic-API-Version'] = (
negotiated_ver)
return self._http_request(url, method, **kwargs)
if resp.status_code >= http_client.BAD_REQUEST:
error_json = _extract_error_json(resp.content)
raise exc.from_response(resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status_code in (http_client.MOVED_PERMANENTLY,
http_client.FOUND, http_client.USE_PROXY):
# Redirected. Reissue the request to the new location.
location = resp.headers.get('location')
resp = self._http_request(location, method, **kwargs)
elif resp.status_code == http_client.MULTIPLE_CHOICES:
raise exc.from_response(resp, method=method, url=url)
return resp
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['data'] = jsonutils.dump_as_bytes(kwargs.pop('body'))
resp = self._http_request(url, method, **kwargs)
body = resp.content
content_type = resp.headers.get('content-type', None)
status = resp.status_code
if (status in (http_client.NO_CONTENT, http_client.RESET_CONTENT) or
content_type is None):
return resp, list()
if 'application/json' in content_type:
try:
body = resp.json()
except ValueError:
LOG.error(_LE('Could not decode response body as JSON'))
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
def _construct_http_client(endpoint=None,
session=None,
token=None,
auth_ref=None,
os_ironic_api_version=DEFAULT_VER,
api_version_select_state='default',
max_retries=DEFAULT_MAX_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
timeout=600,
ca_file=None,
cert_file=None,
key_file=None,
insecure=None,
**kwargs):
if session:
kwargs.setdefault('service_type', 'baremetal')
kwargs.setdefault('user_agent', 'python-ironicclient')
kwargs.setdefault('interface', kwargs.pop('endpoint_type', None))
kwargs.setdefault('endpoint_override', endpoint)
ignored = {'token': token,
'auth_ref': auth_ref,
'timeout': timeout != 600,
'ca_file': ca_file,
'cert_file': cert_file,
'key_file': key_file,
'insecure': insecure}
dvars = [k for k, v in ignored.items() if v]
if dvars:
LOG.warning(_LW('The following arguments are ignored when using '
'the session to construct a client: %s'),
', '.join(dvars))
return SessionClient(session=session,
os_ironic_api_version=os_ironic_api_version,
api_version_select_state=api_version_select_state,
max_retries=max_retries,
retry_interval=retry_interval,
endpoint=endpoint,
**kwargs)
else:
if kwargs:
LOG.warning(_LW('The following arguments are being ignored when '
'constructing the client: %s'), ', '.join(kwargs))
return HTTPClient(endpoint=endpoint,
token=token,
auth_ref=auth_ref,
os_ironic_api_version=os_ironic_api_version,
api_version_select_state=api_version_select_state,
max_retries=max_retries,
retry_interval=retry_interval,
timeout=timeout,
ca_file=ca_file,
cert_file=cert_file,
key_file=key_file,
insecure=insecure)
|
apache-2.0
| 5,864,973,488,052,471,000
| 39.873817
| 175
| 0.575095
| false
| 4.25657
| false
| false
| false
|
dpineo/gadann
|
gadann/layer.py
|
1
|
22760
|
#
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 Daniel Pineo (daniel@pineo.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy
import operator
import scipy
import logging
import cv2
import functools
import pickle
from . import cudnn
from . import kernels
from . import tensor
logger = logging.getLogger(__name__)
# ----------------------- Layer --------------------------
class Layer(object):
def __init__(self, **kwargs):
self.params = []
self.name = kwargs['name']
self.input_shape = kwargs['input_shape'] + (3-len(kwargs['input_shape']))*(1,)
self.input_size = functools.reduce(operator.__mul__, self.input_shape, 1)
self.updater = kwargs['updater']
pass
def fprop(self, input):
return input
def bprop(self, input, fprop_result=None):
return input
def gradient(self, input, output):
return {}
def update(self, grads):
self.updater.update(self.params, grads)
def show(self):
pass
def save(self):
# Save weights to file
fo = gzip.GzipFile(self.name + '_weights.gz', 'wb')
pickle.dump([param.get() for param in self.params], fo)
fo.close()
# ----------------------- LinearLayer --------------------------
class LinearLayer(Layer):
def __init__(self, **kwargs):
super(LinearLayer, self).__init__(**kwargs)
if 'stride' in kwargs:
self.stride = kwargs['stride']
else:
self.stride = (1, 1)
if 'padding' in kwargs:
self.padding = kwargs['padding']
else:
self.padding = (0, 0)
self.n_features = kwargs['n_features']
try:
self.shape = (self.n_features, self.input_shape[0]) + kwargs['shape'] + (2-len(kwargs['shape']))*(1,)
except:
self.shape = (self.n_features,)+kwargs['input_shape']
self.output_shape = (self.shape[0],)+tuple([(x-y)//s+1+2*p for x, y, s, p in zip(self.input_shape[-2:], self.shape[-2:], self.stride, self.padding)])
init = kwargs.get('init', 0.01)
w = tensor.Tensor(init*numpy.random.randn(*self.shape))
v_bias = tensor.zeros((1, self.input_size))
h_bias = tensor.zeros((1, self.n_features))
self.params = {'w': w, 'v_bias': v_bias, 'h_bias': h_bias}
return
self.filter_descriptor = cudnn.cudnnCreateFilterDescriptor()
self.bias_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.convolution_descriptor = cudnn.cudnnCreateConvolutionDescriptor()
self.input_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.output_descriptor = cudnn.cudnnCreateTensorDescriptor()
cudnn.cudnnSetFilter4dDescriptor(
self.filter_descriptor,
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*w.shape
)
logger.info('filter_descriptor:', cudnn.cudnnGetFilter4dDescriptor(self.filter_descriptor))
cudnn.cudnnSetTensor4dDescriptor(
self.bias_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*(h_bias.shape + (1,1))
)
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(shape=" + str(self.input_shape) + ")"
def show(self):
cv2.imshow(self.name, .1*self.params['w'].mosaic().get()+.5)
cv2.waitKey(1)
def fprop(self, input):
#if self.shape[1:] == input.shape[1:]:
if True:
return self.fprop_dense(input)
else:
return self.fprop_conv(input)
def bprop(self, input, fprop_result=None):
#if self.shape[0] == input.shape[1]:
if True:
return self.bprop_dense(input, fprop_result)
else:
return self.bprop_conv(input, fprop_result)
def gradient(self, input, output):
#if self.shape[1:] == input.size:
if True:
return self.gradient_dense(input, output)
else:
return self.gradient_conv(input, output)
def fprop_dense(self, input):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
result = input.dot(w.T()) + input.ones_vector.dot(h_bias)
assert not numpy.isnan(result.get()).any()
return result
def bprop_dense(self, input, fprop_result=None):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
result = input.dot(w) + input.ones_vector.dot(v_bias)
assert not numpy.isnan(result.get()).any()
return result
def fprop_conv(self, input):
assert len(input.shape) == 4
w, v_bias, h_bias = self.params
assert not numpy.isnan(w.get()).any()
assert not numpy.isnan(v_bias.get()).any()
assert not numpy.isnan(h_bias.get()).any()
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
logger.info('input_descriptor:', cudnn.cudnnGetTensor4dDescriptor(self.input_descriptor))
cudnn.cudnnSetConvolution2dDescriptor(
self.convolution_descriptor,
self.padding[0], self.padding[1], self.stride[0], self.stride[1], 1, 1,
cudnn.cudnnConvolutionMode['CUDNN_CONVOLUTION'])
logger.info('convolution_descriptor:', cudnn.cudnnGetConvolution2dDescriptor(self.convolution_descriptor))
# Get output dimensions (first two values are n_input and filters_out)
batch, channels, height_output, width_output = cudnn.cudnnGetConvolution2dForwardOutputDim(
self.convolution_descriptor,
self.input_descriptor,
self.filter_descriptor
)
# Output tensor
output = tensor.Tensor((batch, self.n_features, height_output, width_output))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
logger.info('output_descriptor:', cudnn.cudnnGetTensor4dDescriptor(self.output_descriptor))
workspace_size = cudnn.cudnnGetConvolutionForwardWorkspaceSize(
cudnn_context,
self.input_descriptor,
self.filter_descriptor,
self.convolution_descriptor,
self.output_descriptor,
cudnn.cudnnConvolutionFwdPreference['CUDNN_CONVOLUTION_FWD_PREFER_FASTEST'],
).value
workspace = tensor.Tensor((workspace_size,))
logger.info('workspace_size:', workspace_size)
algo = cudnn.cudnnGetConvolutionForwardAlgorithm(
cudnn_context,
self.input_descriptor,
self.filter_descriptor,
self.convolution_descriptor,
self.output_descriptor,
cudnn.cudnnConvolutionFwdPreference['CUDNN_CONVOLUTION_FWD_PREFER_FASTEST'],
0
)
assert(not numpy.isnan(input.get()).any())
assert(not numpy.isnan(w.get()).any())
# Perform convolution
cudnn.cudnnConvolutionForward(
cudnn_context,
1,
self.input_descriptor,
input.data(),
self.filter_descriptor,
w.data(),
self.convolution_descriptor,
algo,
workspace.data(),
workspace_size,
0,
self.output_descriptor,
output.data()
)
assert( not numpy.isnan(output.get()).any())
cudnn.cudnnAddTensor(
cudnn_context,
cudnn.cudnnAddMode['CUDNN_ADD_SAME_C'],
1,
self.bias_descriptor,
h_bias.data(),
1,
self.output_descriptor,
output.data()
)
assert not numpy.isnan(output.get()).any()
return output
def bprop_conv(self, input, fprop_result=None):
assert len(input.shape) == 4
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
cudnn.cudnnSetConvolution2dDescriptor(
self.convolution_descriptor,
0, 0, 1, 1, 1, 1,
cudnn.cudnnConvolutionMode['CUDNN_CONVOLUTION'])
# Output tensor
output = tensor.Tensor((input.shape[0], w.shape[1], input.shape[2]+w.shape[2]-1, input.shape[3]+w.shape[3]-1))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
# Perform convolution
cudnn.cudnnConvolutionBackwardData(
cudnn_context,
1,
self.filter_descriptor,
w.data(),
self.input_descriptor,
input.data(),
self.convolution_descriptor,
0,
self.output_descriptor,
output.data()
)
assert not numpy.isnan(output.get()).any()
return output
def gradient_dense(self, v, h):
return {
'w': h.T().dot(v),
'h_bias': h.T().dot(h.ones_vector)
}
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*v.shape
)
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*(h.shape + (1,1))
)
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
# Perform convolution
cudnn.cudnnConvolutionBackwardFilter(
cudnn_context,
1,
self.input_descriptor,
v.data(),
self.output_descriptor,
h.data(),
self.convolution_descriptor,
1,
self.filter_descriptor,
w_grad.data()
)
cudnn.cudnnConvolutionBackwardBias(
cudnn_context,
1,
self.output_descriptor,
h.data(),
1,
self.bias_descriptor,
h_bias_grad.data()
)
assert not numpy.isnan(w.get()).any()
assert not numpy.isnan(h_bias.get()).any()
return [w_grad, v_bias_grad, h_bias_grad]
'''
def gradient_dense(self, input, output):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
tensor.sgemm(output.T(), input, w_grad, alpha=1, beta=0)
tensor.sgemv(h_bias_grad, output.T(), input.ones_vector.T(), alpha=1, beta=0)
assert not numpy.isnan(w_grad.get()).any()
assert not numpy.isnan(v_bias_grad.get()).any()
assert not numpy.isnan(h_bias_grad.get()).any()
return {'w': w_grad, 'v_bias': v_bias_grad, 'h_bias': h_bias_grad}
'''
def gradient_conv(self, input, output):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
# Perform convolution
cudnn.cudnnConvolutionBackwardFilter(
cudnn_context,
1,
self.input_descriptor,
input.data(),
self.output_descriptor,
output.data(),
self.convolution_descriptor,
1,
self.filter_descriptor,
w_grad.data()
)
cudnn.cudnnConvolutionBackwardBias(
cudnn_context,
1,
self.output_descriptor,
output.data(),
1,
self.bias_descriptor,
h_bias_grad.data()
)
assert not numpy.isnan(w_grad.get()).any()
assert not numpy.isnan(h_bias_grad.get()).any()
return [w_grad, v_bias_grad, h_bias_grad]
# ----------------------- DenseLayer --------------------------
class DenseLayer(LinearLayer):
def __init__(self, **kwargs):
super(DenseLayer, self).__init__(**kwargs)
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(shape=" + str(self.feature_shape) + ")"
def show(self):
w = self.params['w']
if w.shape[1] not in (1, 3):
return
cv2.imshow(self.name, self.params['w'].mosaic().get()/10+.5)
cv2.moveWindow(self.name, 0, 0)
cv2.waitKey(1)
class ActivationLayer(Layer):
def __init__(self, activation, **kwargs):
super(ActivationLayer, self).__init__(**kwargs)
self.activation = activation
self.d_activation = getattr(kernels, 'd'+activation.__name__)
self.output_shape = self.input_shape
def fprop(self, input):
result = self.activation(input)
assert not numpy.isnan(result.get()).any()
return result
def bprop(self, input, fprop_result=None):
if fprop_result:
result = self.d_activation(input, fprop_result)
else:
f = self.activation(input)
result = (tensor.Tensor(numpy.ones_like(f.get()))-f) * f
assert not numpy.isnan(result.get()).any()
return result
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(activation='" + self.activation.__name__ + "')"
class DropoutLayer(Layer):
def __init__(self, **kwargs):
super(DropoutLayer, self).__init__(**kwargs)
self.p_exclude = float(kwargs.pop('prob'))
self.p_include = float(1-self.p_exclude)
self.output_shape = self.input_shape
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(p=" + str(self.p_exclude) + ")"
class MaxPoolingLayer(Layer):
def __init__(self, **kwargs):
super(MaxPoolingLayer, self).__init__(**kwargs)
if 'padding' not in kwargs:
kwargs['padding'] = (0, 0)
if 'stride' not in kwargs:
kwargs['stride'] = (1, 1)
self.shape = kwargs['shape']
self.padding = kwargs['padding']
self.stride = kwargs['stride']
self.output_shape = (self.input_shape[0], (self.input_shape[1]+2*self.padding[0])/self.stride[0], (self.input_shape[2]+2*self.padding[1])/self.stride[1])
self.pooling_descriptor = cudnn.cudnnCreatePoolingDescriptor()
self.input_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.output_descriptor = cudnn.cudnnCreateTensorDescriptor()
cudnn.cudnnSetPooling2dDescriptor(
self.pooling_descriptor,
cudnn.cudnnPoolingMode['CUDNN_POOLING_MAX'],
self.shape[0],
self.shape[1],
self.padding[0],
self.padding[1],
self.stride[0],
self.stride[1]
)
pass
def fprop(self, input):
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
# Output tensor
output = tensor.Tensor((input.shape[0], input.shape[1], input.shape[2]/self.stride[0], input.shape[3]/self.stride[1]))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
cudnn.cudnnPoolingForward(
cudnn_context,
self.pooling_descriptor,
1,
self.input_descriptor,
input.data(),
0,
self.output_descriptor,
output.data()
)
return output
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(p=" + str(self.p_exclude) + ")"
class ReshapeLayer(Layer):
def __init__(self, **kwargs):
super(ReshapeLayer, self).__init__(**kwargs)
self.input_shape = kwargs['input_shape']
self.output_shape = kwargs['output_shape']
assert(reduce(operator.__mul__, self.input_shape, 1) == reduce(operator.__mul__, self.output_shape, 1))
def fprop(self, input):
assert input.shape[1:] == self.input_shape
input.shape = (input.shape[0],) + self.output_shape
return input
def bprop(self, input, fprop_result=None):
assert input.shape[1:] == self.output_shape
input.shape = (input.shape[0],) + self.input_shape
return input
# ----------------------- ConvLayer --------------------------
class GadannConvLayer(LinearLayer):
def __init__(self, **kwargs):
super(ConvLayer, self).__init__(**kwargs)
# self.conv_step = kwargs['conv_step']
# self.w.axes = ['features_out', 'features_in', 'height', 'width']
def fprop(self, input):
w, v_bias, h_bias = self.params
result = Tensor((input.shape[0],w.shape[0])+tuple([x-y for x,y in zip(input.shape[-2:],w.shape[-2:])]))
grid = (result.shape[-2]/16,result.shape[-1]/16,result.shape[0])
conv2d_16x16_kernel(input.gpuarray, w.gpuarray, h_bias.gpuarray, result.gpuarray, numpy.uint32(self.w.shape[1]), numpy.uint32(w.shape[0]), block=(16,16,1), grid=grid)
assert not numpy.isnan(result.get()).any()
return result
def bprop(self, input, fprop_result=None):
w, v_bias, h_bias = self.params
result = tensor.zeros((input.shape[0],w.shape[1])+tuple([x+y for x,y in zip(input.shape[-2:],w.shape[-2:])]))
grid = (input.shape[3]/16,input.shape[2]/16,1)
bconv2d_16x16_kernel(input.gpuarray, w.gpuarray, v_bias.gpuarray, result.gpuarray, numpy.uint32(w.shape[0]), numpy.uint32(w.shape[1]), block=(16,16,1), grid=grid)
assert not numpy.isnan(result.get()).any()
return result
def gradient(self, v, h):
w, v_bias, h_bias = self.params
w_update = tensor.zeros(w.shape)
v_bias_update = tensor.zeros(v_bias.shape)
h_bias_update = tensor.zeros(h_bias.shape)
grid = (h.shape[-2]/16,h.shape[-1]/16) # revisit: grid rounds down to nearest 16
kernels.iconv2d_16x16_kernel(v.gpuarray, h.gpuarray, w_update.gpuarray, numpy.uint32(w_update.shape[1]), numpy.uint32(w_update.shape[0]), block=(16,16,1), grid=grid)
kernels.iconv2d_h_bias_16x16_naive_kernel(h.gpuarray, h_bias_update.gpuarray, numpy.uint32(reduce(operator.__mul__, h.shape[-2:], 1)), block=(w_update.shape[0],1,1), grid=grid)
v_bias_block = w.shape[-2:] + (1,)
v_bias_grid = (1,1,1)
kernels.iconv2d_v_bias_16x16_naive_kernel(v.gpuarray, v_bias_update.gpuarray, numpy.uint32(v.shape[1]-16), numpy.uint32(v.shape[2]-16), block=v_bias_block, grid=v_bias_grid)
assert not numpy.isnan(w_update.get()).any()
assert not numpy.isnan(v_bias_update.get()).any()
assert not numpy.isnan(h_bias_update.get()).any()
return [w_update, v_bias_update, h_bias_update]
# ----------------------- NumpyConvLayer --------------------------
class NumpyConvLayer(LinearLayer):
def fprop(self, input):
result = []
for n in range(self.w.shape[0]):
result.append(scipy.signal.convolve(input.get()[0,...], self.w.get()[n,...], mode='valid'))
return Tensor(numpy.ascontiguousarray(numpy.vstack(result)[numpy.newaxis,...]))
def bprop(self, input, fprop_result=None):
result = []
for n in range(self.w.shape[0]):
w = numpy.fliplr(numpy.flipud(self.w.get()[n,...]))
result.append(scipy.signal.convolve(input.get()[n,...], w, mode='full'))
result = numpy.vstack(result).mean(axis=0)
return Tensor(result)
# TODO: disabled for Fermi
#cudnn_context = cudnn.cudnnCreate()
|
mit
| 3,916,060,930,377,858,000
| 34.532905
| 184
| 0.564587
| false
| 3.750824
| false
| false
| false
|
denfromufa/mipt-course
|
pysandbox/torrent_dht_demo_test.py
|
1
|
9817
|
# Copyright (c) 2012 Timur Iskhodzhanov and MIPT students. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import logging
import os
import random
import re
import sys
import Queue # Renamed to queue in 3.0
import unittest
class FakeDHT(object):
# This is a fake DHT, see http://en.wikipedia.org/wiki/Distributed_hash_table
def __init__(self):
self.__table = {}
# O(log N)
#
# TODO(timurrrr): in fact, we don't need a precise set for a given key,
# a non-empty subset of the most recent machines would work.
# This can also automatically fix the abscence of Remove()
def Append(self, key, new_values):
# will raise exception if 'values' is not a sequence
logging.debug("FakeDHT.Append('%s', '%s')", key, new_values)
if key not in self.__table.keys():
self.__table[key] = new_values
else:
self.__table[key].extend(new_values)
# O(log N)
def Get(self, key):
ret = self.__table[key]
logging.debug("FakeDHT.Get('%s') returns '%s'", key, ret)
return ret
class FakeP2PNetwork(object):
def __init__(self):
# will contain (key -> (receive queue) pairs
# where receive queue holds list of (sender_id, message) pairs.
self.__nodes = {}
self.__availableIDs = set(["Alice", "Brian", "Colin", "David", "Ellie"])
# Returns new node ID
def CreateNode(self):
new_id = random.choice(list(self.__availableIDs))
self.__availableIDs.remove(new_id)
self.__nodes[new_id] = Queue.Queue()
logging.info("New node: %s" % new_id)
return new_id
# Returns True on success, False on failure
# Design flaw: we can use other node's ID as sender_id
# TODO(timurrrr): FakeSocket
def Send(self, sender_id, to_id, message):
assert sender_id in self.__nodes.keys()
if to_id not in self.__nodes.keys():
logging.error("'%s' node is unknown" % to_id)
return False
self.__nodes[to_id].put((sender_id, copy.deepcopy(message)))
return True
# Returns (from, message) pair if present (FIFO), None if no messages are
# available.
# Design flaw: we can use other node's ID as receiver_id
def Receive(self, receiver_id):
if self.__nodes[receiver_id].empty():
return None
return self.__nodes[receiver_id].get()
class TorrentID(object):
CHUNK_SIZE = 4
def __init__(self, contents):
# contents should be a string
self.length = len(contents)
self.filehash = hashlib.sha1(contents).hexdigest()
self.partial_hashes = []
for chunk_id in range(self.length / TorrentID.CHUNK_SIZE + 1):
ph = self.__CalculatePartialHash(contents, chunk_id)
if ph != None:
self.partial_hashes.append(ph)
logging.info("Created torrent:\nContents='%s'\n%s" % (contents, str(self)))
# implicitly called by str(torrent_id)
def __str__(self):
return "Torrent:\n hash=%s\n size=%d\n partial_hashes=[\n %s\n ]\n" % (
self.filehash, self.length, ",\n ".join(self.partial_hashes))
@staticmethod
def GetChunkData(contents, chunk_id):
return contents[chunk_id * TorrentID.CHUNK_SIZE :
(chunk_id+1) * TorrentID.CHUNK_SIZE]
@staticmethod
def SetChunkData(contents, chunk_id, new_data):
idx_l = chunk_id * TorrentID.CHUNK_SIZE
idx_r = idx_l + TorrentID.CHUNK_SIZE
return contents[:idx_l] + new_data + contents[idx_r:]
@staticmethod
def GetChunkHash(chunk_data):
return hashlib.sha1(chunk_data).hexdigest()
@staticmethod
def __CalculatePartialHash(contents, chunk_id):
chunk = TorrentID.GetChunkData(contents, chunk_id)
if len(chunk) > 0:
return TorrentID.GetChunkHash(chunk)
return None
def IsKnownChunk(self, contents, chunk_id):
return self.__CalculatePartialHash(contents, chunk_id) == (
self.partial_hashes[chunk_id])
class TorrentClient(object):
def __init__(self, network, dht):
self.__network = network
self.__id = network.CreateNode()
# Will store (<chunk>_key -> set(machines which know <chunk>)
self.__dht = dht
# Torrent hash -> (torrent, contents) dictionary.
# 'contents' should contain '*' for unknown bytes (TODO: this is a hack)
self.__data = {}
# Torrents to be downloaded
# (torrent hash -> set of missing chunk indices)
self.__partial_torrents = {}
# List of finished-but-not-taken torrent hashes
self.__downloaded = []
def GetDownloadedTorrent(self):
# Peek and return any downloaded torrent as a (torrent, contents) tuple
# (if present), otherwise None.
if len(self.__downloaded) == 0:
return None
ret_hash = self.__downloaded[-1]
self.__downloaded = self.__downloaded[:-1]
return self.__data[ret_hash]
@staticmethod
def __ChunkKey(torrent, chunk_id):
assert chunk_id in range(len(torrent.partial_hashes))
return "chunk_%s_%d" % (torrent.filehash, chunk_id)
def AddTorrent(self, torrent, known_contents=None):
assert torrent.filehash not in self.__data.keys()
if known_contents:
for chunk in range(len(torrent.partial_hashes)):
self.__dht.Append(self.__ChunkKey(torrent, chunk), [self.__id])
print "%s: Loaded torrent '%s'" % (self.__id, known_contents)
else:
known_contents = "*" * torrent.length
self.__partial_torrents[torrent.filehash] = (
set(range(len(torrent.partial_hashes))))
self.__data[torrent.filehash] = (torrent, known_contents)
def Tick(self):
message = self.__network.Receive(self.__id)
if message:
self.__ProcessMessage(message)
if len(self.__partial_torrents.keys()) > 0:
# Select random a torrent to download a chunk
filehash = random.choice(self.__partial_torrents.keys())
torrent = self.__data[filehash][0]
# ... random chunk
needed_chunks = self.__partial_torrents[filehash]
chunk = random.choice(list(needed_chunks))
chunk_key = self.__ChunkKey(torrent, chunk)
# ... random host
chunk_available_at = random.choice(self.__dht.Get(chunk_key))
# Ask the host to send the chunk of that torrent
self.__network.Send(self.__id, chunk_available_at, "give_" + chunk_key)
def __ProcessMessage(self, msg):
(from_id, contents) = msg
logging.debug("Node '%s' received a message '%s' from '%s'",
self.__id, contents, from_id)
m = re.match("give_chunk_([0-9a-f]+)_([0-9]+)", contents)
if m:
# Process "give_chunk_<hash>_<chunk>" message
(filehash, chunk_id) = m.groups()
if filehash not in self.__data.keys():
logging.error("Hey, I don't know such a torrent!")
return
chunk_id = int(chunk_id)
(torrent, contents) = self.__data[filehash]
if not torrent.IsKnownChunk(contents, chunk_id):
logging.error("Hey, I don't have this chunk!")
logging.info("Current torrent contents are:\n '%s'" % contents)
return
chunk_key = self.__ChunkKey(torrent, chunk_id)
chunk_data = TorrentID.GetChunkData(contents, chunk_id)
self.__network.Send(self.__id, from_id,
"take_%s %s" % (chunk_key, chunk_data))
return
m = re.match("take_chunk_([0-9a-f]+)_([0-9]+) (.*)", contents)
if not m:
logging.error("Couldn't parse this message '%s'", msg)
return
# Process "take_chunk_<hash>_<chunk> <contents>" message
(filehash, chunk_id, chunk_data) = m.groups()
chunk_id = int(chunk_id)
if filehash not in self.__partial_torrents.keys():
logging.info("Hey, I didn't want this torrent!")
return
needed_chunks = self.__partial_torrents[filehash]
(torrent, known_contents) = self.__data[filehash]
if chunk_id not in needed_chunks:
logging.warning("%s: Hey, I didn't want this chunk! %d not in %s" % (
self.__id, chunk_id, str(needed_chunks)))
logging.warning("Current torrent contents are:\n '%s'" % known_contents)
return
if torrent.GetChunkHash(chunk_data) != torrent.partial_hashes[chunk_id]:
logging.error("Hash mismatch!") # Call security?
return
known_contents = torrent.SetChunkData(known_contents, chunk_id, chunk_data)
self.__data[filehash] = (torrent, known_contents)
print "%s: New contents are '%s'" % (self.__id, known_contents)
needed_chunks.remove(chunk_id)
chunk_key = self.__ChunkKey(torrent, chunk_id)
self.__dht.Append(chunk_key, [self.__id])
if len(needed_chunks) == 0:
logging.info("Torrent #%s download finished!" % filehash)
self.__downloaded.append(filehash)
self.__partial_torrents.pop(filehash)
return
class TorrentDhtDemoTest(unittest.TestCase):
def runTest(self):
print # ugly hack to force a newline
myfile = "AAAABBBBCCCCDDDDEEEEFF"
mytorrent = TorrentID(myfile)
network = FakeP2PNetwork()
dht = FakeDHT()
clients = []
for i in range(3):
clients.append(TorrentClient(network, dht))
if i == 0: # Seeder
clients[i].AddTorrent(mytorrent, myfile)
else: # others
clients[i].AddTorrent(mytorrent)
received_file = None
while not received_file:
for c in clients:
c.Tick() # Simulate parallel execution
received_file = clients[1].GetDownloadedTorrent()
self.assertEqual(received_file[0], mytorrent)
self.assertEqual(received_file[1], myfile)
# Run the test suite.
if __name__ == '__main__':
# replace ERROR with INFO, DEBUG, etc. and re-run. Notice any changes?
logging.basicConfig(stream=sys.stdout,
level=logging.ERROR, # Don't print anything less serious
format="%(asctime)s [%(levelname)s] %(message)s")
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
bsd-3-clause
| 1,982,451,436,412,792,600
| 33.205575
| 81
| 0.640522
| false
| 3.506071
| true
| false
| false
|
GoogleCloudPlatform/datacatalog-connectors-bi
|
google-datacatalog-sisense-connector/src/google/datacatalog_connectors/sisense/prepare/datacatalog_tag_template_factory.py
|
1
|
12677
|
#!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import datacatalog
from google.cloud.datacatalog import TagTemplate
from google.datacatalog_connectors.commons import prepare
from google.datacatalog_connectors.sisense.prepare import constants
class DataCatalogTagTemplateFactory(prepare.BaseTagTemplateFactory):
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
def __init__(self, project_id: str, location_id: str):
self.__project_id = project_id
self.__location_id = location_id
def make_tag_template_for_dashboard(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_DASHBOARD)
tag_template.display_name = 'Sisense Dashboard Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='folder_id',
field_type=self.__STRING_TYPE,
display_name='Folder Id',
order=7)
self._add_primitive_type_field(tag_template=tag_template,
field_id='folder_name',
field_type=self.__STRING_TYPE,
display_name='Folder Name',
order=6)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='folder_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the Folder',
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='datasource',
field_type=self.__STRING_TYPE,
display_name='Data Source',
order=4)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='last_publish',
field_type=self.__TIMESTAMP_TYPE,
display_name='Time it was last published',
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='last_opened',
field_type=self.__TIMESTAMP_TYPE,
display_name='Time it was last opened',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
def make_tag_template_for_folder(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_FOLDER)
tag_template.display_name = 'Sisense Folder Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=11)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='parent_id',
field_type=self.__STRING_TYPE,
display_name='Id of Parent',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='parent_name',
field_type=self.__STRING_TYPE,
display_name='Parent Folder',
order=7)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='parent_folder_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the parent Folder',
order=6)
self._add_primitive_type_field(tag_template=tag_template,
field_id='has_children',
field_type=self.__BOOL_TYPE,
display_name='Has children',
is_required=True,
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='child_count',
field_type=self.__DOUBLE_TYPE,
display_name='Child count',
order=4)
self._add_primitive_type_field(tag_template=tag_template,
field_id='has_dashboards',
field_type=self.__BOOL_TYPE,
display_name='Has dashboards',
is_required=True,
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_count',
field_type=self.__DOUBLE_TYPE,
display_name='Dashboard count',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
def make_tag_template_for_widget(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_WIDGET)
tag_template.display_name = 'Sisense Widget Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='type',
field_type=self.__STRING_TYPE,
display_name='Type',
is_required=True,
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='subtype',
field_type=self.__STRING_TYPE,
display_name='Subtype',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=7)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=6)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_id',
field_type=self.__STRING_TYPE,
display_name='Dashboard Id',
is_required=True,
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_title',
field_type=self.__STRING_TYPE,
display_name='Dashboard Title',
is_required=True,
order=4)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='dashboard_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the Dashboard',
is_required=True,
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='datasource',
field_type=self.__STRING_TYPE,
display_name='Data Source',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
|
apache-2.0
| 1,877,983,408,668,116,500
| 45.951852
| 78
| 0.441587
| false
| 5.403666
| false
| false
| false
|
schettino72/serveronduty
|
websod/database.py
|
1
|
1071
|
import os
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
metadata = MetaData()
def get_sa_db_uri(driver='', username='', password='', host='', port='', database=''):
"""get SQLAlchemy DB URI: driver://username:password@host:port/database"""
assert driver
if driver == 'sqlite':
# get absolute file path
if not database.startswith('/'):
db_file = os.path.abspath(database)
else:
db_file = database
db_uri = '%s:///%s' % (driver, db_file)
else:
db_uri = ('%s://%s:%s@%s:%s/%s' %
(driver, username, password, host, port, database))
return db_uri
class DB(object):
def __init__(self, db_uri):
self.engine = create_engine(db_uri, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
def init_database(self):
metadata.create_all(bind=self.engine)
|
mit
| 8,379,561,955,119,394,000
| 27.184211
| 86
| 0.577031
| false
| 4.026316
| false
| false
| false
|
blinkseb/script.tv.betaseries
|
json.py
|
1
|
10173
|
import string
import types
## json.py implements a JSON (http://json.org) reader and writer.
## Copyright (C) 2005 Patrick D. Logan
## Contact mailto:patrickdlogan@stardecisions.com
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class _StringGenerator(object):
def __init__(self, string):
self.string = string
self.index = -1
def peek(self):
i = self.index + 1
if i < len(self.string):
return self.string[i]
else:
return None
def next(self):
self.index += 1
if self.index < len(self.string):
return self.string[self.index]
else:
raise StopIteration
def all(self):
return self.string
class WriteException(Exception):
pass
class ReadException(Exception):
pass
class JsonReader(object):
hex_digits = {'A': 10,'B': 11,'C': 12,'D': 13,'E': 14,'F':15}
escapes = {'t':'\t','n':'\n','f':'\f','r':'\r','b':'\b'}
def read(self, s):
self._generator = _StringGenerator(s)
result = self._read()
return result
def _read(self):
self._eatWhitespace()
peek = self._peek()
if peek is None:
raise ReadException, "Nothing to read: '%s'" % self._generator.all()
if peek == '{':
return self._readObject()
elif peek == '[':
return self._readArray()
elif peek == '"':
return self._readString()
elif peek == '-' or peek.isdigit():
return self._readNumber()
elif peek == 't':
return self._readTrue()
elif peek == 'f':
return self._readFalse()
elif peek == 'n':
return self._readNull()
elif peek == '/':
self._readComment()
return self._read()
else:
raise ReadException, "Input is not valid JSON: '%s'" % self._generator.all()
def _readTrue(self):
self._assertNext('t', "true")
self._assertNext('r', "true")
self._assertNext('u', "true")
self._assertNext('e', "true")
return True
def _readFalse(self):
self._assertNext('f', "false")
self._assertNext('a', "false")
self._assertNext('l', "false")
self._assertNext('s', "false")
self._assertNext('e', "false")
return False
def _readNull(self):
self._assertNext('n', "null")
self._assertNext('u', "null")
self._assertNext('l', "null")
self._assertNext('l', "null")
return None
def _assertNext(self, ch, target):
if self._next() != ch:
raise ReadException, "Trying to read %s: '%s'" % (target, self._generator.all())
def _readNumber(self):
isfloat = False
result = self._next()
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "."):
isfloat = isfloat or peek == "."
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result)
else:
return int(result)
except ValueError:
raise ReadException, "Not a valid JSON number: '%s'" % result
def _readString(self):
result = ""
self._next()
try:
while self._peek() != '"':
ch = self._next()
if ch == "\\":
ch = self._next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = self._next()
ch256 = self._next()
ch16 = self._next()
ch1 = self._next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result = result + ch
except StopIteration:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
self._next()
return result
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except ValueError:
raise ReadException, "The character %s is not a hex digit." % ch
return result
def _readComment(self):
self._next()
second = self._next()
if second == "/":
self._readDoubleSolidusComment()
elif second == '*':
self._readCStyleComment()
else:
raise ReadException, "Not a valid JSON comment: %s" % self._generator.all()
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except StopIteration:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except StopIteration:
pass
def _readArray(self):
result = []
self._next()
done = self._peek() == ']'
while not done:
item = self._read()
result.append(item)
self._eatWhitespace()
done = self._peek() == ']'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
self._next()
return result
def _readObject(self):
result = {}
a = self._next()
assert a == '{'
done = self._peek() == '}'
while not done:
key = self._read()
if type(key) is not types.StringType:
raise ReadException, "Not a valid JSON object key (should be a string): %s" % key
self._eatWhitespace()
ch = self._next()
if ch != ":":
raise ReadException, "Not a valid JSON object: '%s' due to: '%s'" % (self._generator.all(), ch)
self._eatWhitespace()
val = self._read()
result[key] = val
self._eatWhitespace()
done = self._peek() == '}'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
self._next()
return result
def _eatWhitespace(self):
p = self._peek()
while p is not None and p in string.whitespace or p == '/':
if p == '/':
self._readComment()
else:
self._next()
p = self._peek()
def _peek(self):
return self._generator.peek()
def _next(self):
return self._generator.next()
class JsonWriter(object):
def _append(self, s):
self._results.append(s)
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = []
self._write(obj)
return "".join(self._results)
def _write(self, obj):
ty = type(obj)
if ty is types.DictType:
n = len(obj)
self._append("{")
for k, v in obj.items():
self._write(k)
self._append(":")
self._write(v)
n = n - 1
if n > 0:
self._append(",")
self._append("}")
elif ty is types.ListType or ty is types.TupleType:
n = len(obj)
self._append("[")
for item in obj:
self._write(item)
n = n - 1
if n > 0:
self._append(",")
self._append("]")
elif ty is types.StringType or ty is types.UnicodeType:
self._append('"')
obj = obj.replace('\\', r'\\')
if self._escaped_forward_slash:
obj = obj.replace('/', r'\/')
obj = obj.replace('"', r'\"')
obj = obj.replace('\b', r'\b')
obj = obj.replace('\f', r'\f')
obj = obj.replace('\n', r'\n')
obj = obj.replace('\r', r'\r')
obj = obj.replace('\t', r'\t')
self._append(obj)
self._append('"')
elif ty is types.IntType or ty is types.LongType:
self._append(str(obj))
elif ty is types.FloatType:
self._append("%f" % obj)
elif obj is True:
self._append("true")
elif obj is False:
self._append("false")
elif obj is None:
self._append("null")
else:
raise WriteException, "Cannot write in JSON: %s" % repr(obj)
def write(obj, escaped_forward_slash=False):
return JsonWriter().write(obj, escaped_forward_slash)
def read(s):
return JsonReader().read(s)
|
gpl-2.0
| -2,107,853,004,309,323,000
| 31.710611
| 136
| 0.505751
| false
| 3.992543
| false
| false
| false
|
datamade/rlr
|
rlr/crossvalidation.py
|
1
|
4227
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from builtins import range
import numpy
import logging
import warnings
import collections
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def gridSearch(examples,
labels,
learner,
num_cores,
k=3,
search_space=[.00001, .0001, .001, .01, .1, 1],
randomize=True):
if num_cores < 2 :
from multiprocessing.dummy import Pool
else :
from .backport import Pool
repeats = max(1, int(150/len(labels)))
pool = Pool()
logger.info('using cross validation to find optimum alpha...')
alpha_tester = AlphaTester(learner)
alpha_scores = collections.defaultdict(list)
for repeat in range(repeats):
permutation = numpy.random.permutation(labels.size)
examples = examples[permutation]
labels = labels[permutation]
labeled_examples = (examples, labels)
for alpha in search_space:
score_jobs = [pool.apply_async(alpha_tester,
(subset, validation, alpha))
for subset, validation in
kFolds(labeled_examples, k)]
scores = [job.get() for job in score_jobs]
alpha_scores[alpha].extend(scores)
best_alpha, score = max(alpha_scores.items(),
key=lambda x: reduceScores(x[1]))
logger.info('optimum alpha: %f, score %s' % (best_alpha, reduceScores(score)))
pool.close()
pool.join()
return best_alpha
# http://code.activestate.com/recipes/521906-k-fold-cross-validation-partition/
def kFolds(labeled_examples, k):
examples, labels = labeled_examples
if k < 2 :
raise ValueError("Number of folds must be at least 2")
if len(labels) < 2 :
raise ValueError("At least two training datum are required")
for i in range(k):
selected_indices = range(i, examples.shape[0], k)
validation = (examples[selected_indices, :],
labels[selected_indices])
training = (numpy.delete(examples, selected_indices, axis=0),
numpy.delete(labels, selected_indices))
if len(training[1]) and len(validation[1]) :
yield (training, validation)
else :
warnings.warn("Only providing %s folds out of %s requested" %
(i, k))
break
class AlphaTester(object) :
def __init__(self, learner) : # pragma : no cover
self.learner = learner
def __call__(self, training, validation, alpha) :
training_examples, training_labels = training
self.learner.alpha = alpha
self.learner.fit_alpha(training_examples, training_labels, None)
validation_examples, validation_labels = validation
predictions = self.learner.predict_proba(validation_examples)
return scorePredictions(validation_labels, predictions)
def scorePredictions(true_labels, predictions) :
# http://en.wikipedia.org/wiki/Matthews_correlation_coefficient
true_dupes = int(numpy.sum(predictions[true_labels == 1] > 0.5))
false_dupes = int(numpy.sum(predictions[true_labels == 0] > 0.5))
true_distinct = int(numpy.sum(predictions[true_labels == 0] <= 0.5))
false_distinct = int(numpy.sum(predictions[true_labels == 1] <= 0.5))
if not (true_dupes + false_dupes) * (true_distinct + false_distinct) :
return 0
matthews_cc = ((true_dupes * true_distinct
- false_dupes * false_distinct)
/numpy.sqrt((true_dupes + false_dupes)
* (true_dupes + false_distinct)
* (true_distinct + false_dupes)
* (true_distinct + false_distinct)))
return matthews_cc
def reduceScores(scores) :
scores = [score for score in scores
if score is not None and not numpy.isnan(score)]
if scores :
average_score = sum(scores)/len(scores)
else :
average_score = 0
return average_score
|
bsd-3-clause
| 3,518,832,934,487,426,000
| 29.410072
| 82
| 0.588597
| false
| 4.131965
| false
| false
| false
|
podbregarb/prvi-projekt
|
prop.py
|
1
|
48496
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
# Združljivost za Python 2 in Python 3
try:
basestring
except NameError:
basestring = str
# Ali naj se seznami konjunktov in disjunktov sortirajo?
# Nastavi na list za nesortiranje
# Nastavi na sorted za sortiranje
sortSet = sorted
def paren(s, level, expl):
"""Postavi oklepaje okoli izraza.
Vrne niz s, ko je level <= expl, niz s, obdan z oklepaji, sicer.
Argumenti:
s -- niz za izpis
level -- nivo postavljanja oklepajev
exp -- najmanjša vrednost argumenta level, da se izpišejo oklepaji
"""
return s if level <= expl else '('+s+')'
def isLiteral(s):
"""Ugotovi, ali je s niz, ki predstavlja logično spremenljivko.
Argument:
s -- ime spremenljivke
"""
return isinstance(s, basestring) and re.match(r'^[a-z][a-z0-9]*$', s)
def nnf(f):
"""Vrne izraz f v negacijski normalni obliki, torej brez implikacij
in z negacijami samo neposredno na spremenljivkah.
Argument:
f -- logični izraz
"""
return f.simplify()
def cnf(f):
"""Vrne izraz f v konjunktivni normalni obliki, torej kot konjunkcijo
enega ali več disjunkcij spremenljivk in njihovih negacij.
Argument:
f -- logični izraz
"""
return f.flatten().cnf()
def dnf(f):
"""Vrne izraz f v disjunktivni normalni obliki, torej kot disjunkcijo
enega ali več konjunkcij spremenljivk in njihovih negacij.
Argument:
f -- logični izraz
"""
return f.flatten().dnf()
def getValues(d, root=None, p=None):
"""Vrne prireditve vrednosti spremenljivkam.
Če katera od spremenljivk nima vrednosti, vrne None. V nasprotnem primeru
prireditve vrne v obliki slovarja.
Argumenta:
d -- slovar podizrazov
root -- koren grafa
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if root != None:
if not root.getSure(p):
return root
val = {k.p: v.getValue(p) for (k, v) in d.items() if isinstance(k, Literal)}
if root == None and None in val.values():
return None
else:
return {k: v for (k, v) in val.items() if v != None}
def sat(f, d=None, root=False, trace=False):
"""Poskusi določiti izpolnljivost logične formule f s pomočjo linearnega
algoritma.
Če ugotovi, da formula ni izpolnljiva, vrne False.
Če najde prireditev vrednosti spremenljivkam, da je formula izpolnljiva,
jo vrne v obliki slovarja.
Če ne ugotovi, ali je formula izpolnljiva, vrne None.
Argumenti:
f -- logični izraz
d -- slovar podizrazov, privzeto None (naredi nov slovar)
root -- ali naj se vrne koren grafa v primeru neodločenosti
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if not type(d) == dict:
d = {}
n = f.simplify().ncf().node(d)
if not n.valuate(True, (None, 0), None, trace):
return False
out = getValues(d, n)
if not root and type(out) != dict:
return None
else:
return out
def sat3(f, d=None, root=False, trace=False):
"""Poskusi določiti izpolnljivost logične formule f s pomočjo kubičnega
algoritma.
Če ugotovi, da formula ni izpolnljiva, vrne False.
Če najde prireditev vrednosti spremenljivkam, da je formula izpolnljiva,
jo vrne v obliki slovarja.
Če ne ugotovi, ali je formula izpolnljiva, vrne None.
Argumenti:
f -- logični izraz
d -- slovar podizrazov, privzeto None (naredi nov slovar)
root -- ali naj se vrne koren grafa v primeru neodločenosti
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if not type(d) == dict:
d = {}
rt = sat(f, d, True, trace)
if rt == False or type(rt) == dict:
return rt
next = sum([[(n, k) for k in range(n.numVariants()) if n.v[k] == None] for n in d.values()], [])
lt = len(next)
ln = lt+1
while lt < ln:
todo = next
next = []
for n, k in todo:
if n.v[k] != None:
continue
if trace > 1:
print("Trying to assign temporary values to %d:%s" % (k, n))
if n.valuate(True, (None, k), (True, k), trace):
s = getValues(d, rt, True)
if type(s) == dict:
return s
if n.valuate(False, (None, k), (False, k), trace):
s = getValues(d, rt, False)
if type(s) == dict:
return s
for nn in d.values():
nn.clearTemp()
else:
for nn in d.values():
for i in range(nn.numVariants()):
if nn.vt[i] != None:
nn.setValue(nn.vt[i], nn.ct[i], (None, i))
nn.clearTemp()
else:
for nn in d.values():
nn.clearTemp()
if n.valuate(False, (None, k), (None, k), trace):
s = getValues(d, rt)
if type(s) == dict:
return s
else:
return False
if n.v[k] != None:
next.append((n, k))
ln = lt
lt = len(next)
if root:
return rt
else:
False
def dpllStep(l, trace=False):
"""Korak metode DPLL.
Argumenta:
l -- seznam disjunktov
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
num = 1
out = []
while num > 0:
while num > 0:
literals = {}
next = []
for x in l:
if isinstance(x, Literal):
if x.p in literals and not literals[x.p]:
if trace:
print("Contradiction for literal %s" % x.p)
return False
else:
literals[x.p] = True
elif isinstance(x, Not):
if x.t.p in literals and literals[x.t.p]:
if trace:
print("Contradiction for literal %s" % x.p)
return False
else:
literals[x.t.p] = False
elif len(x.l) == 0:
if trace:
print("Empty disjunction found")
return False
elif not any([Not(y) in x.l for y in x.l if isinstance(y, Literal)]):
next.append(x)
num = len(literals)
out += literals.items()
l = [y for y in [x.apply(literals) for x in next] if not isinstance(y, And)]
if trace > 1:
print("Found %d literals: %s, simplified to %s" % (num, literals, l))
pure = {}
for d in l:
for x in d.l:
if isinstance(x, Literal):
pure[x.p] = None if (x.p in pure and pure[x.p] != True) else True
else:
pure[x.t.p] = None if (x.t.p in pure and pure[x.t.p] != False) else False
purs = [(k, v) for (k, v) in pure.items() if v != None]
num = len(purs)
out += purs
l = [y for y in [x.apply(dict(purs)) for x in l] if not isinstance(y, And)]
if trace > 1:
print("Found %d pures: %s, simplified to %s" % (num, purs, l))
if len(l) == 0:
return dict(out)
p = [k for (k, v) in pure.items() if v == None][0]
if trace:
print("Trying %s:T" % p)
true = dpllStep([y for y in [x.apply({p: True}) for x in l] if not isinstance(y, And)], trace)
if type(true) == dict:
return dict(out + [(p, True)] + true.items())
if trace:
print("Failed %s:T" % p)
print("Trying %s:F" % p)
false = dpllStep([y for y in [x.apply({p: False}) for x in l] if not isinstance(y, And)], trace)
if type(false) == dict:
return dict(out + [(p, False)] + false.items())
if trace:
print("Failed %s:F" % p)
return False
def dpll(f, trace=False):
"""Glavni program metode DPLL.
Argumenta:
f -- logični izraz
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
f = cnf(f)
if isinstance(f, And):
l = f.l
else:
l = [f]
return dpllStep(l, trace)
def test():
izrazi=[And('p','q'),Or('p','q'),Or('p',And('q','p')),And(Or(Not('p'),'q'),'p'),And(Or('p','q'),Or('p','r')),And(And('p','q'),And('q','r'),And('r','p')),And(Or('p','q'),Or('q','r'),Or('r','p'),Not(And('p','q')),Not(And('q','r')),Not(And('r','p')))]
for i in izrazi:
print(i)
print(dpll(i))
def abbrev(p, s=None):
"""Vrne okrajšano obliko opisa stanja valuacije.
Argumenta:
p -- objekt za krajšanje
s -- zagotovilo, privzeto None
"""
if type(p) == tuple:
return '(%s,%d)' % (abbrev(p[0]), p[1])
elif type(p) == list:
return '[%s]' % ''.join([abbrev(x, s[i]) for i, x in enumerate(p)])
elif p == True:
return 'T' if s else 't'
elif p == False:
return 'F' if s else 'f'
else:
return 'N' if s else 'n'
class DAGNode:
"""Abstraktni razred vozlišča v usmerjenem acikličnem grafu (DAG).
Metode:
__init__ -- konstruktor
__repr__ -- znakovna predstavitev
init -- inicializacija
getValue -- vrne ustrezno trenutno vrednost
setValue -- nastavi ustrezno trenutno vrednost
getSure -- ali vrednosti otrok zagotavljajo trenutno vrednost
setSure -- nastavi zagotovilo o trenutni vrednosti
clearTemp -- pobriše začasne oznake
numVariants -- število variant podizrazov, ki jih je treba preveriti
valuate -- valuacija v dano logično vrednost
parents -- posodobitev stanja staršev
update -- posodobitev po spremembi stanja enega od otrok
Spremenljivke:
a -- seznam prednikov
v -- trenutno znane vrednosti izraza
vt -- začasne vrednosti ob predpostavki o veljavnosti začetnega vozlišča
vf -- začasne vrednosti ob predpostavki o neveljavnosti začetnega vozlišča
c -- vozlišča, od katerega so prišle vrednosti izraza
ct -- vozlišča, od katerega so prišle vrednosti izraza ob predpostavki o
veljavnosti začetnega vozlišča
cf -- vozlišča, od katerega so prišle vrednosti izraza ob predpostavki o
neveljavnosti začetnega vozlišča
s -- ali vrednosti otrok zagotavljajo trenutno znane vrednosti
st -- ali vrednosti otrok zagotavljajo trenutno znane začasne vrednosti
ob predpostavki o veljavnosti začetnega vozlišča
sf -- ali vrednosti otrok zagotavljajo trenutno znane začasne vrednosti
ob predpostavki o neveljavnosti začetnega vozlišča
"""
def __init__(self):
"""Konstruktor. Na abstraktnem razredu ga ne smemo klicati."""
raise Exception('Instantiating an abstract class.')
def __repr__(self):
"""Znakovna predstavitev."""
return '%s(%s,%s)' % tuple([abbrev(x, y) for (x, y) in [(self.v, self.s), (self.vt, self.st), (self.vf, self.sf)]])
def init(self):
"""Inicializacija vozlišča."""
self.a = []
self.v = [None]*self.numVariants()
self.vt = [None]*self.numVariants()
self.vf = [None]*self.numVariants()
self.c = [None]*self.numVariants()
self.ct = [None]*self.numVariants()
self.cf = [None]*self.numVariants()
self.s = [False]*self.numVariants()
self.st = [False]*self.numVariants()
self.sf = [False]*self.numVariants()
def getValue(self, p=None):
"""Vrne trajno ali začasno vrednost izraza.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
return self.v[k]
elif p:
return self.vt[k]
else:
return self.vf[k]
def setValue(self, b, c=None, p=None):
"""Nastavi trajno ali začasno vrednost izraza. Če sta začasni
vrednosti enaki, nastavi tudi trajno vrednost.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto None
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
self.v[k] = b
self.vt[k] = b
self.vf[k] = b
self.c[k] = c
elif p:
self.vt[k] = b
self.ct[k] = c
if self.vf[k] == b:
self.v[k] = b
self.c[k] = (c, self.cf[k])
else:
self.vf[k] = b
self.cf[k] = c
if self.vt[k] == b:
self.v[k] = b
self.c[k] = (self.ct[k], c)
def getSure(self, p=None):
"""Pove, ali vrednosti otrok zagotavljajo trenutno vrednost.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
return self.s[k]
elif p:
return self.st[k]
else:
return self.sf[k]
def setSure(self, p=None, trace=False):
"""Nastavi zagotovilo o trenutni vrednosti. Če obstajata zagotovili
o začasni vrednosti, nastavi zagotovilo o trajni vrednosti.
Vrne True, če je zagotovilo novo, in False, če je že obstajalo.
Argumenta:
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
if self.s[k]:
return False
self.s[k] = True
self.st[k] = True
self.sf[k] = True
elif p:
if self.st[k]:
return False
self.st[k] = True
if self.sf[k]:
self.s[k] = True
else:
if self.sf[k]:
return False
self.sf[k] = True
if self.st[k]:
self.s[k] = True
if trace > 3:
print("Ensured at %s the value of the node %s" % (abbrev((p, k)), self))
return True
def clearTemp(self):
"""Pobriše začasne oznake."""
for i in range(self.numVariants()):
if self.v[i] == None:
self.vt[i] = None
self.vf[i] = None
self.ct[i] = None
self.cf[i] = None
self.st[i] = False
self.sf[i] = False
def numVariants(self):
"""Vrne število variant podizrazov, ki jih je treba preveriti.
Generična metoda, vrne 1."""
return 1
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Metodo kličejo nadomestne metode v dedujočih razredih. Če je vrednost
že določena, pove, ali podana vrednost ustreza določeni. V nasprotnem
primeru nastavi podano vrednost in vrne None. Tedaj sledi nadaljnja
obdelava v klicoči metodi.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
v = self.getValue(p)
if v != None:
if trace:
if v != b:
print("Error valuating to %s:%s the node %s from %s" % (abbrev(p), abbrev(b), self, c))
elif trace > 4:
print("Skipping valuation to %s:%s of the node %s" % (abbrev(p), abbrev(b), self))
return v == b
if trace > 2:
print("Valuating to %s:%s the node %s" % (abbrev(p), abbrev(b), self))
self.setValue(b, c, p)
return None
def parents(self, b, p=None, trace=False):
"""Posodobi starše po uspešni valuaciji v logično vrednost b.
Vrne True, če so vse posodobitve uspele, in False sicer.
Argumenti:
b -- nastavljena vrednost
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
for x in self.a:
if type(x) == tuple:
x, t = x
else:
t = 0
if not x.update(b, (self, k), (p, t), trace):
return False
return True
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji enega od otrok v logično vrednost b.
Generična metoda, ne spreminja stanja in vrne True.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
return True
class DAGLiteral(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično spremenljivko.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
p -- ime spremenljivke
"""
def __init__(self, d, p):
"""Konstruktor. Nastavi ime spremenljivke.
Argumenta:
d -- slovar podizrazov
p -- ime spremenljivke
"""
self.p = p
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
return '%s: %s' % (DAGNode.__repr__(self), self.p)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p = p[0]
self.setSure(p, trace)
return DAGNode.valuate(self, b, c, p, trace) != False and self.parents(b, p, trace)
class DAGNot(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično negacijo.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
t -- vozlišče, ki ustreza negiranemu izrazu
"""
def __init__(self, d, t):
"""Konstruktor. Za negirani izraz poišče ali ustvari vozlišče
ter se vanj doda kot starš.
Argumenta:
d -- slovar podizrazov
t -- negirani izraz
"""
self.t = t.node(d)
self.t.a.append(self)
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
r = str(self.t)
if len(r) > 100:
r = '...'
return "%s: ~(%s)" % (DAGNode.__repr__(self), r)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti in se
negirani izraz uspešno valuira v nasprotno vrednost.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
val = DAGNode.valuate(self, b, c, p, trace)
if val == None:
if type(p) == tuple:
p = p[0]
return self.t.valuate(not b, (self, 0), p, trace) and self.parents(b, p, trace)
else:
return val
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji otroka v logično vrednost b.
Uspe, če uspe valuacija v nasprotno vrednost od b.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p = p[0]
sure = self.t.getSure(p) and self.setSure(p, trace)
if b != None:
b = not b
val = DAGNode.valuate(self, b, c, p, trace)
if val == False:
return False
elif val:
b = None
return (b == None and not sure) or self.parents(b, p, trace)
class DAGAnd(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično konjunkcijo.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
l -- seznam vozlišč, ki ustrezajo konjunktom
"""
def __init__(self, d, l):
"""Konstruktor. Za vsak konjunkt poišče ali ustvari vozlišče
ter se doda kot starš dobljenemu vozlišču.
Argumenta:
d -- slovar podizrazov
l -- seznam konjuktov
"""
self.l = [x.node(d) for x in l]
for i, x in enumerate(self.l):
x.a.append((self, i))
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
r = ') /\\ ('.join([str(x) for x in self.l])
if len(r) > 100:
r = '%d conjuncts' % len(self.l)
return '%s: (%s)' % (DAGNode.__repr__(self), r)
def getValue(self, p=None):
"""Vrne trajno ali začasno vrednost izraza.
Če hočemo vrednost zadnjega podizraza (dolžine 1), vrnemo vrednost zadnjega konjunkta.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple and p[1] == self.numVariants():
return self.l[-1].getValue(p[0])
else:
return DAGNode.getValue(self, p)
def numVariants(self):
"""Vrne število variant podizrazov, ki jih je treba preveriti.
Vrne 1 ali število konjunktov minus 1."""
return max(1, len(self.l)-1)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti. Če je
b resničen, se morajo še vsi konjunkti valuirati v True. V nasprotnem
primeru preveri, ali je trenutna vrednost vsaj enega konjunkta različna
od True. Če edini tak konjunkt še nima vrednosti, ga valuira v False.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
val = DAGNode.valuate(self, b, c, p, trace)
if val == None:
if type(p) == tuple:
p, k = p
else:
k = 0
if len(self.l) == 0:
if not b:
return False
self.setSure(p, trace)
elif len(self.l) == 1:
if not self.l[0].valuate(b, (self, k), p, trace):
return False
else:
i = k
if b:
while i < len(self.l)-1:
val = DAGNode.valuate(self, True, (self, k), (p, i+1), trace) if i < len(self.l)-2 else self.l[-1].valuate(True, (self, k), p, trace)
if val == False or not self.l[i].valuate(True, (self, k), p, trace):
return False
elif val:
break
i += 1
else:
while i < len(self.l)-1:
if self.l[i].getValue(p):
val = DAGNode.valuate(self, False, (self, k), (p, i+1), trace) if i < len(self.l)-2 else self.l[-1].valuate(False, (self, k), p, trace)
if val == False:
return False
if val:
break
else:
if (self.getValue((p, i+1)) if i < len(self.l)-2 else self.l[-1].getValue(p)) and not self.l[i].valuate(False, (self, k), p, trace):
return False
break
i += 1
if k > 0:
return self.update(b, (self, k), (p, k-1), trace)
else:
return self.parents(b, p, trace)
else:
return val
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji enega od otrok v logično vrednost b.
Če je b neresničen, se poskusi valuirati v False. Če je v nasprotnem
primeru trenutna vrednost True, preveri, ali je trenutna vrednost vsaj
enega konjunkta različna od True. Če edini tak konjunkt še nima
vrednosti, ga valuira v False.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if len(self.l) <= 1:
sure = True
else:
if b:
if k == len(self.l)-1:
k -= 1
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
elif (c[0] if type(c) == tuple else c) != self:
if self.getValue((p, k)) == False:
if not (self.valuate(False, c, (p, k+1), trace) if k < len(self.l)-2 else self.l[-1].valuate(False, c, p, trace)):
return False
else:
b = None
elif not (self.l[-1].getValue(p) if k == len(self.l)-2 else self.getValue((p, k+1))):
b = None
else:
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
sure = (self.l[-1].getSure(p) if k == len(self.l)-2 else self.getSure((p, k+1))) and self.l[k].getSure(p) and self.setSure((p, k), trace)
while b != None:
val = DAGNode.valuate(self, True, c, (p, k), trace)
if val == False:
return False
elif val:
b = None
k -= 1
if k < 0:
break
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
sure = sure and self.l[k].getSure(p) and self.setSure((p, k), trace)
else:
if k == len(self.l)-1:
k -= 1
sure = (self.l[-1].getValue(p) == False and self.l[-1].getSure(p)) if k == len(self.l)-2 else (self.getValue((p, k+1)) == False and self.getSure((p, k+1)))
sure = (sure or (self.l[k].getValue(p) == False and self.l[k].getSure(p))) and self.setSure((p, k), trace)
while b != None:
val = DAGNode.valuate(self, False, c, (p, k), trace)
if val == False:
return False
elif val:
b = None
k -= 1
if k < 0:
break
sure = (sure or (self.l[k].getValue(p) == False and self.l[k].getSure(p))) and self.setSure((p, k), trace)
while sure and k > 0:
k -= 1
sure = self.l[k].getSure(p)
if self.getValue((p, k)) == False:
sure = sure or (self.l[-1].getValue(p) if k == len(self.l)-2 else self.getValue((p, k+1))) == False
sure = sure and self.setSure((p, k), trace)
return (b == None and not sure) or self.parents(b, p, trace)
class LogicalFormula:
"""Abstraktni razred logičnih formul.
Metode:
__init__ -- konstruktor
__hash__ -- zgostitev
__repr__ -- znakovna predstavitev
__eq__ -- relacija "je enak"
__ne__ -- relacija "ni enak"
__lt__ -- relacija "je manjši"
__le__ -- relacija "je manjši ali enak"
__gt__ -- relacija "je večji"
__ge__ -- relacija "je večji ali enak"
flatten -- splošči izraz
simplify -- poenostavi izraz
cnf -- pretvori v konjunktivno normalno obliko
dnf -- pretvori v disjunktivno normalno obliko
ncf -- pretvori v obliko z negacijami in konjunkcijami
apply -- vrne izraz glede na podane vrednosti spremenljivk
node -- vrne vozlišče v DAG, ki ustreza izrazu
"""
def __init__(self):
"""Konstruktor. Na abstraktnem razredu ga ne smemo klicati."""
raise Exception('Instantiating an abstract class.')
def __hash__(self):
"""Zgostitev. Vrne zgostitev znakovne predstavitve."""
return self.__repr__().__hash__()
def __repr__(self, level=0):
"""Znakovna predstavitev.
Argument:
level -- nivo za postavljanje oklepajev, privzeto 0 (brez oklepajev)
"""
return ""
def __eq__(self, other):
"""Relacija "je enak".
Zaradi dedovanja metode __hash__ je definirana kot negacija relacije
"ni enak".
"""
return not (self != other)
def __ne__(self, other):
"""Relacija "ni enak".
Podrazredi morajo povoziti to metodo.
"""
return True
def __lt__(self, other):
"""Relacija "je manjši".
Podrazredi morajo povoziti to metodo.
"""
return True
def __le__(self, other):
"""Relacija "je manjši ali enak".
Definirana je kot negacija relacije "je večji".
"""
return not (self > other)
def __gt__(self, other):
"""Relacija "je večji".
Definirana je kot presek relacij "je večji ali enak" in "ni enak".
"""
return self >= other and self != other
def __ge__(self, other):
"""Relacija "je večji ali enak".
Definirana je kot negacija relacije "je manjši".
"""
return not (self < other)
def flatten(self):
"""Splošči izraz.
Generična metoda, vrne sebe.
"""
return self
def simplify(self):
"""Poenostavi izraz.
Generična metoda, vrne sebe.
"""
return self
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Generična metoda, vrne sebe.
"""
return self
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Generična metoda, vrne sebe.
"""
return self
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Generična metoda, vrne sebe.
"""
return self
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Generična metoda, vrne sebe.
Argument:
d -- slovar vrednosti spremenljivk
"""
return self
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Generična metoda, javi napako.
Argument:
d -- slovar vozlišč za izraze
"""
raise Exception('Not applicable in DAG.')
class Literal(LogicalFormula):
"""Logična spremenljivka.
Deduje od razreda LogicalFormula.
Spremenljivka:
p -- ime spremenljivke
"""
def __init__(self, p):
"""Konstruktor. Nastavi se ime spremenljivke, ki mora biti niz malih
črk.
Argument:
p -- ime spremenljivke
"""
if not isLiteral(p):
raise Exception('Literals must be strings of lowercase letters!')
self.p = p
def __repr__(self, level=0):
"""Znakovna predstavitev. Ta je enaka imenu spremenljivke."""
return paren(self.p, level, 6)
def __ne__(self, other):
"""Relacija "ni enak".
Spremenljivke se razlikujejo po svojem imenu.
"""
return not isinstance(other, Literal) or self.p != other.p
def __lt__(self, other):
"""Relacija "je manjši".
Spremenljivke se razvrščajo po svojem imenu in so manjše od ostalih
logičnih izrazov.
"""
if isinstance(other, Literal):
return self.p < other.p
else:
return isinstance(other, LogicalFormula)
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Nadomesti spremenljivko z vrednostjo iz slovarja, če ta obstaja.
Argument:
d -- slovar vrednosti spremenljivk
"""
if self.p in d:
if isLiteral(d[self.p]):
return Literal(d[self.p])
elif isinstance(d[self.p], bool):
return Tru() if d[self.p] else Fls()
elif isinstance(d[self.p], LogicalFormula):
return d[self.p].flatten()
return self
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGLiteral(d, self.p)
d[self] = n
return d[self]
class Not(LogicalFormula):
"""Logična negacija.
Deduje od razreda LogicalFormula.
Spremenljivka:
t -- negirani izraz
"""
def __init__(self, t):
"""Konstruktor. Nastavi se negirani izraz.
Če je t veljaven niz, se negira spremenljivka s tem imenom.
Argument:
t -- negirani izraz
"""
if isLiteral(t):
t = Literal(t)
elif not isinstance(t, LogicalFormula):
raise Exception('Only logical formulas can be negated!')
self.t = t
def __repr__(self, level=0):
"""Znakovna predstavitev. Negacija se označi z znakom ~."""
return paren('~'+self.t.__repr__(6), level, 6)
def __ne__(self, other):
"""Relacija "ni enak".
Negacije se ločijo po negiranem izrazu.
"""
return not isinstance(other, Not) or self.t != other.t
def __lt__(self, other):
"""Relacija "je manjši".
Negacije se razvrščajo po negiranem izrazu in so manjše od ostalih
logičnih izrazov, razen spremenljivk.
"""
if isinstance(other, Not):
return self.t < other.t
else:
return isinstance(other, LogicalFormula) and not isinstance(other, Literal)
def flatten(self):
"""Splošči izraz.
Izniči dvojne negacije in splošči podizraze."""
if isinstance(self.t, Not):
return self.t.t.flatten()
elif isinstance(self.t, And):
return Or([Not(x) for x in self.t.l]).flatten()
elif isinstance(self.t, Or):
return And([Not(x) for x in self.t.l]).flatten()
else:
return self
def simplify(self):
"""Poenostavi izraz.
Izniči dvojno negacijo ter porine negacijo v konjunkcijo ali
disjunkcijo po de Morganovih zakonih.
"""
if isinstance(self.t, Not):
return self.t.t.simplify()
elif isinstance(self.t, And):
return Or([Not(x) for x in self.t.l]).simplify()
elif isinstance(self.t, Or):
return And([Not(x) for x in self.t.l]).simplify()
else:
return self
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Izniči dvojno negacijo ter porine negacijo v disjunkcijo po
de Morganovih zakonih.
"""
if isinstance(self.t, Not):
return self.t.t.ncf()
elif isinstance(self.t, Or):
return And([Not(x).ncf() for x in self.t.l])
else:
return Not(self.t.ncf())
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na negiranem izrazu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return Not(self.t.apply(d)).flatten()
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGNot(d, self.t)
d[self] = n
return d[self]
class And(LogicalFormula):
"""Logična konjunkcija.
Deduje od razreda LogicalFormula.
Spremenljivka:
l -- seznam konjunktov
"""
def __init__(self, *l):
"""Konstruktor. Nastavijo se konjunkti.
Konjunkti so lahko podani kot argumenti, kot seznam ali kot
logična konjunkcija. Če je kateri od konjunktov veljaven niz, se
uporabi spremenljivka s tem imenom.
Argumenti:
*l -- konjunkti
"""
self.l = None
if len(l) == 1:
if isinstance(l[0], Or):
self.l = l[0].l
elif isLiteral(l[0]):
self.l = [Literal(l[0])]
elif isinstance(l[0], list) or isinstance(l[0], tuple):
l = list(l[0])
if self.l == None:
l = [Literal(x) if isLiteral(x) else x for x in l]
if any([not isinstance(x, LogicalFormula) for x in l]):
raise Exception('Only logical formulas can be conjoined!')
self.l = l[:]
def __repr__(self, level=0):
"""Znakovna predstavitev. Konjunkti so ločeni z znakoma /\. Prazna
konjunkcija je logična resnica in se označi z znakom T."""
if len(self.l) == 0:
return paren('T', level, 6)
elif len(self.l) == 1:
return self.l[0].__repr__(level)
else:
return paren(' /\\ '.join([x.__repr__(6) for x in self.l]), level, 5)
def __ne__(self, other):
"""Relacija "ni enak".
Konjukcije se ločijo po seznamu konjunktov.
"""
return not isinstance(other, And) or self.l != other.l
def __lt__(self, other):
"""Relacija "je manjši".
Konjukcije se razvrščajo po seznamu konjunktov in so manjše od
disjunkcij.
"""
if isinstance(other, And):
return self.l < other.l
else:
return isinstance(other, LogicalFormula) and not isinstance(other, Literal) and not isinstance(other, Not)
def flatten(self):
"""Splošči izraz."""
if len(self.l) == 1:
return self.l[0].flatten()
else:
l = sum([y.l if isinstance(y, And) else [y] for y in [x.flatten() for x in self.l]], [])
if any([isinstance(x, Or) and len(x.l) == 0 for x in l]):
return Fls()
elif len(l) == 1:
return l[0]
else:
return And(l)
def simplify(self):
"""Poenostavi izraz.
Najprej splošči gnezdene konjunkcije med poenostavljenimi konjunkti.
Če je konjunkt natanko eden, ga vrne, sicer pa poenostavi disjunkcije
med konjunkti po pravilih absorpcije. Če je po teh poenostavitvah
kateri od konjunktov prazna disjunkcija (tj. logična neresnica) ali se
kateri od konjunktov pojavi še v negirani obliki, potem vrne logično
neresnico. V nasprotnem primeru se konjunkti uredijo po določenem
vrstnem redu.
"""
l = sum([y.l if isinstance(y, And) else [y] for y in [x.simplify() for x in self.l]], [])
if len(l) == 1:
return l[0]
else:
l = set(l)
l.difference_update([x for x in l if isinstance(x, Or) and any([y in x.l for y in l])])
assorb = [(x, [y.t for y in l if isinstance(y, Not) and y.t in x.l] + [Not(y) for y in l if Not(y) in x.l]) for x in l if isinstance(x, Or)]
remove = [x[0] for x in assorb if len(x[1]) > 0]
add = [Or([y for y in x[0].l if y not in x[1]]).simplify() for x in assorb if len(x[1]) > 0]
l.difference_update(remove)
l.update(add)
if len(l) == 1:
return l.pop()
if any([isinstance(x, Or) and len(x.l) == 0 for x in l]) or any([x.t in l for x in l if isinstance(x, Not)]):
return Fls()
return And(sortSet(l))
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Vse konjunkte pretvori v konjunktivno normalno obliko.
"""
return And([x.cnf() for x in self.l]).flatten()
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Če je število konjunktov 0 ali 1, vrne sebe oziroma edinega konjunkta v
disjunktivni normalni obliki. Sicer pretvori vse konjunkte v
disjunktivno normalno obliko, nato pa po pravilih za distributivnost
naredi disjunkcijo več konjunktov.
"""
if len(self.l) == 0:
return self
elif len(self.l) == 1:
return self.l[0].dnf()
l = [x.dnf() for x in self.flatten().l]
a = [x for x in l if not isinstance(x, Or)]
d = [x for x in l if isinstance(x, Or)]
if len(d) == 0:
return And(a)
else:
return Or([And(a + [x] + d[1:]).dnf() for x in d[0].l]).flatten()
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Vse konjunkte pretvori v obliko z negacijami in konjunkcijami.
"""
return And([x.ncf() for x in self.l])
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na vsakem konjunktu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return And([x.apply(d) for x in self.l]).flatten()
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGAnd(d, self.l)
d[self] = n
return d[self]
class Or(LogicalFormula):
"""Logična disjunkcija.
Deduje od razreda LogicalFormula.
Spremenljivka:
l -- seznam disjunktov
"""
def __init__(self, *l):
"""Konstruktor. Nastavijo se disjunkti.
Disjunkti so lahko podani kot argumenti, kot seznam ali kot
logična disjunkcija. Če je kateri od disjunktov veljaven niz, se
uporabi spremenljivka s tem imenom.
Argumenti:
*l -- disjunkti
"""
self.l = None
if len(l) == 1:
if isinstance(l[0], Or):
self.l = l[0].l
elif isLiteral(l[0]):
self.l = [Literal(l[0])]
elif isinstance(l[0], list) or isinstance(l[0], tuple):
l = list(l[0])
if self.l == None:
l = [Literal(x) if isLiteral(x) else x for x in l]
if any([not isinstance(x, LogicalFormula) for x in l]):
raise Exception('Only logical formulas can be disjoined!')
self.l = l[:]
def __repr__(self, level=0):
"""Znakovna predstavitev. Disjunkti so ločeni z znakoma \/. Prazna
disjunkcija je logična neresnica in se označi z znakom F."""
if len(self.l) == 0:
return paren('F', level, 6)
elif len(self.l) == 1:
return self.l[0].__repr__(level)
else:
return paren(' \\/ '.join([x.__repr__(5) for x in self.l]), level, 4)
def __ne__(self, other):
"""Relacija "ni enak".
Disjukcije se ločijo po seznamu disjunktov.
"""
return not isinstance(other, Or) or self.l != other.l
def __lt__(self, other):
"""Relacija "je manjši".
Disjukcije se razvrščajo po seznamu konjunktov in so večje od ostalih
logičnih izrazov.
"""
return isinstance(other, Or) and self.l < other.l
def flatten(self):
"""Splošči izraz."""
if len(self.l) == 1:
return self.l[0].flatten()
else:
l = sum([y.l if isinstance(y, Or) else [y] for y in [x.flatten() for x in self.l]], [])
if any([isinstance(x, And) and len(x.l) == 0 for x in l]):
return Tru()
elif len(l) == 1:
return l[0]
else:
return Or(l)
def simplify(self):
"""Poenostavi izraz.
Najprej splošči gnezdene disjunkcije med poenostavljenimi disjunkti.
Če je disjunkt natanko eden, ga vrne, sicer pa poenostavi konjunkcije
med disjunkti po pravilih absorpcije. Če je po teh poenostavitvah
kateri od disjunktov prazna konjunkcija (tj. logična resnica) ali se
kateri od disjunktov pojavi še v negirani obliki, potem vrne logično
resnico. V nasprotnem primeru se disjunkti uredijo po določenem
vrstnem redu.
"""
l = sum([y.l if isinstance(y, Or) else [y] for y in [x.simplify() for x in self.l]], [])
if len(l) == 1:
return l[0]
else:
l = set(l)
l.difference_update([x for x in l if isinstance(x, And) and any([y in x.l for y in l])])
assorb = [(x, [y.t for y in l if isinstance(y, Not) and y.t in x.l] + [Not(y) for y in l if Not(y) in x.l]) for x in l if isinstance(x, And)]
remove = [x[0] for x in assorb if len(x[1]) > 0]
add = [And([y for y in x[0].l if y not in x[1]]).simplify() for x in assorb if len(x[1]) > 0]
l.difference_update(remove)
l.update(add)
if len(l) == 1:
return l.pop()
if any([isinstance(x, And) and len(x.l) == 0 for x in l]) or any([x.t in l for x in l if isinstance(x, Not)]):
return Tru()
else:
return Or(sortSet(l))
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Če je število disjunktov 0 ali 1, vrne sebe oziroma edinega disjunkta v
konjunktivni normalni obliki. Sicer pretvori vse disjunkte v
konjunktivno normalno obliko, nato pa po pravilih za distributivnost
naredi konjunkcijo več disjunktov.
"""
if len(self.l) == 0:
return self
elif len(self.l) == 1:
return self.l[0].cnf()
l = [x.cnf() for x in self.flatten().l]
a = [x for x in l if not isinstance(x, And)]
d = [x for x in l if isinstance(x, And)]
if len(d) == 0:
return Or(a)
else:
return And([Or(a + [x] + d[1:]).cnf() for x in d[0].l]).flatten()
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Vse disjunkte pretvori v disjunktivno normalno obliko.
"""
return Or([x.dnf() for x in self.l]).flatten()
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Negacije vseh disjunktov pretvori v obliko z negacijami in
konjunkcijami ter vrne njihovo negirano konjunkcijo.
"""
return Not(And([Not(x).ncf() for x in self.l]))
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na vsakem disjunktu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return Or([x.apply(d) for x in self.l]).flatten()
class Implies(Or):
"""Logična implikacija, predstavljena kot disjunkcija konsekvensa z
negacijo precedensa.
Deduje od razreda Or.
"""
def __init__(self, prec, cons):
"""Konstruktor. Nastavita se disjunkta.
Argumenta:
prec -- precedens
cons -- konsekvens
"""
if isLiteral(prec):
prec = Literal(prec)
if isLiteral(cons):
cons = Literal(cons)
if not isinstance(prec, LogicalFormula) or not isinstance(cons, LogicalFormula):
raise Exception('Only logical formulas can be imply or be implied!')
self.l = [Not(prec), cons]
def __repr__(self, level=0):
"""Znakovna predstavitev. Precedens in konsekvens sta ločena z znakoma
=>."""
if len(self.l) == 2 and isinstance(self.l[0], Not):
return paren(self.l[0].t.__repr__(2) + ' => ' + self.l[1].__repr__(1), level, 1)
else:
return Or.__repr__(self, level)
class Tru(And):
"""Logična resnica, predstavljena kot prazna konjunkcija.
Deduje od razreda And.
"""
def __init__(self):
"""Konstruktor. Nastavi se prazen seznam konjunktov."""
self.l = []
class Fls(Or):
"""Logična neresnica, predstavljena kot prazna disjunkcija.
Deduje od razreda Or.
"""
def __init__(self):
"""Konstruktor. Nastavi se prazen seznam disjunktov."""
self.l = []
|
bsd-3-clause
| 7,319,391,701,429,646,000
| 33.434596
| 252
| 0.555154
| false
| 2.901174
| false
| false
| false
|
warnes/irrigatorpro
|
irrigator_pro/farms/signals.py
|
1
|
11745
|
from django.dispatch import receiver
from django.db.models.signals import *
from farms.models import *
from irrigator_pro.settings import DEBUG
def minNone( *args ):
args = filter( lambda x: x is not None, args)
if args:
return min(args)
else:
return None
## From
## http://stackoverflow.com/questions/15624817/have-loaddata-ignore-or-disable-post-save-signals
from functools import wraps
def disable_for_loaddata(signal_handler):
"""
Decorator that turns off signal handlers when loading fixture data.
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
if kwargs.get('raw', None):
return
signal_handler(*args, **kwargs)
return wrapper
## These signal handlers records the (earliest) relevant date of any
## created/changed/deleted object upon which calculation of
## WaterRegister entries depend.
@receiver(pre_save, sender=WaterHistory)
@receiver(pre_delete, sender=WaterHistory)
@disable_for_loaddata
def handler_WaterHistory(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = WaterHistory.objects.get(pk=instance.id)
old_field=old_instance.field
new_field=new_instance.field
old_field.earliest_changed_dependency_date = minNone(old_field.earliest_changed_dependency_date,
old_instance.datetime.date()
)
new_field.earliest_changed_dependency_date = minNone(new_field.earliest_changed_dependency_date,
new_instance.datetime.date()
)
old_field.save()
new_field.save()
else:
try:
field = instance.field
if instance.datetime:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
instance.datetime.date()
)
field.save()
except ValueError:
pass
@receiver(pre_save, sender=ProbeReading)
@receiver(pre_delete, sender=ProbeReading)
@disable_for_loaddata
def handler_ProbeReading(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = ProbeReading.objects.get(pk=instance.id)
old_radio_id = old_instance.radio_id
old_reading_date = old_instance.datetime.date()
old_probes = Probe.objects.filter(radio_id=old_radio_id,
crop_season__season_start_date__lte=old_reading_date,
crop_season__season_end_date__gte=old_reading_date)
for old_probe in old_probes:
field=old_probe.field
new_date = minNone(field.earliest_changed_dependency_date,
old_instance.datetime.date() )
field.earliest_changed_dependency_date = new_date
#if DEBUG: print "Field %s: %s --> %s " % (field, field.earliest_changed_dependency_date, new_date)
field.save()
this_radio_id = instance.radio_id
this_reading_date = instance.datetime.date()
new_probes = Probe.objects.filter(radio_id=this_radio_id,
crop_season__season_start_date__lte=this_reading_date,
crop_season__season_end_date__gte=this_reading_date)
for new_probe in new_probes:
field=new_probe.field
new_date = minNone(field.earliest_changed_dependency_date,
instance.datetime.date() )
field.earliest_changed_dependency_date = new_date
#if DEBUG: print "Field %s: %s --> %s " % (field, field.earliest_changed_dependency_date, new_date)
field.save()
@receiver(pre_save, sender=CropSeasonEvent)
@receiver(pre_delete, sender=CropSeasonEvent)
@disable_for_loaddata
def handler_CropSeasonEvent(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = CropSeasonEvent.objects.get(pk=instance.id)
old_field = old_instance.field
dep_mdate = minNone(old_field.earliest_changed_dependency_date, old_instance.date)
old_instance.field.earliest_changed_dependency_date = dep_mdate
old_field.save()
field = instance.field
dep_mdate = minNone(field.earliest_changed_dependency_date, instance.date)
field.earliest_changed_dependency_date = dep_mdate
field.save()
@receiver(pre_save, sender=CropSeason)
@receiver(pre_delete, sender=CropSeason)
@disable_for_loaddata
def handler_CropSeason(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = CropSeason.objects.get(pk=instance.id)
old_date = None
new_date = None
if old_instance.season_start_date != new_instance.season_start_date:
old_date = minNone(old_date, old_instance.season_start_date)
new_date = minNone(new_date, new_instance.season_start_date)
if old_instance.season_end_date != new_instance.season_end_date:
old_date = minNone(old_date, old_instance.season_end_date)
new_date = minNone(new_date, new_instance.season_end_date)
if old_instance.crop != new_instance.crop:
old_date = old_instance.season_start_date
new_date = new_instance.season_start_date
if old_date is not None:
for field in old_instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_date)
field.save()
if new_date is not None:
for field in new_instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_date)
field.save()
removed_fields = set( old_instance.field_list.all() ) - \
set( new_instance.field_list.all() )
added_fields = set( new_instance.field_list.all() ) - \
set( old_instance.field_list.all() )
for field in removed_fields:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_instance.season_start_date)
field.save()
for field in added_fields:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_instance.season_start_date)
field.save()
else:
pass
@receiver(post_save, sender=CropSeason)
@disable_for_loaddata
def handler_CropSeason_postsave(sender, instance, created, **kwargs):
if created == True:
for field in instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
instance.season_start_date)
field.save()
@receiver(pre_save, sender=Probe)
@receiver(pre_delete, sender=Probe)
@disable_for_loaddata
def handler_Probe(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = Probe.objects.get(pk=instance.id)
old_radio_id = old_instance.radio_id
old_season_start_date = old_instance.crop_season.season_start_date
old_season_end_date = old_instance.crop_season.season_end_date
old_probereadings = ProbeReading.objects.filter(radio_id=old_radio_id,
datetime__range=(old_season_start_date,
old_season_end_date)
)
if old_probereadings:
old_earliest_probereading_date = old_probereadings.earliest('datetime').datetime.date();
else:
old_earliest_probereading_date = None;
new_radio_id = new_instance.radio_id
new_season_start_date = new_instance.crop_season.season_start_date
new_season_end_date = new_instance.crop_season.season_end_date
new_probereadings = ProbeReading.objects.filter(radio_id=new_radio_id,
datetime__range=(new_season_start_date,
new_season_end_date)
)
if new_probereadings:
new_earliest_probereading_date = new_probereadings.earliest('datetime').datetime.date();
else:
new_earliest_probereading_date = None;
if old_radio_id != new_radio_id: # changed radioid
if old_instance.id and old_instance.field:
field=old_instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_earliest_probereading_date)
field.save()
if new_instance.id and new_instance.field:
field=new_instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_earliest_probereading_date)
field.save()
old_field = old_instance.field
new_field = new_instance.field
if old_field:
old_field.earliest_changed_dependency_date = minNone(old_field.earliest_changed_dependency_date,
old_earliest_probereading_date)
old_field.save()
if new_field:
new_field.earliest_changed_dependency_date = minNone(new_field.earliest_changed_dependency_date,
new_earliest_probereading_date)
new_field.save()
else: # new object or delete object
radio_id = instance.radio_id
season_start_date = instance.crop_season.season_start_date
season_end_date = instance.crop_season.season_end_date
probereadings = ProbeReading.objects.filter(radio_id=radio_id,
datetime__range=(season_start_date,
season_end_date)
)
if probereadings:
earliest_probereading_date = probereadings.earliest('datetime').datetime.date();
else:
earliest_probereading_date = None;
if instance.id and instance.field:
field=instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
earliest_probereading_date)
field.save()
|
mit
| -8,849,396,207,397,116,000
| 42.66171
| 111
| 0.558791
| false
| 4.28337
| false
| false
| false
|
alex/sentry
|
tests/sentry/web/frontend/tests.py
|
1
|
15481
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import json
from django.conf import settings as django_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from sentry.conf import settings
from sentry.models import Group, Project, TeamMember, \
MEMBER_OWNER, MEMBER_USER, Team
from sentry.web.helpers import get_login_url
from tests.base import TestCase
logger = logging.getLogger(__name__)
class SentryViewsTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
self.user.set_password('admin')
self.user.save()
def test_auth(self):
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/login.html')
resp = self.client.post(reverse('sentry-login'), {
'username': 'admin',
'password': 'admin',
}, follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'sentry/login.html')
def test_dashboard(self):
# no projects redirects them to create new project
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/projects/new.html')
# requires at least one project to show dashboard
Project.objects.create(name='foo', owner=self.user)
Project.objects.create(name='bar', owner=self.user).team
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/dashboard.html')
# no projects and unauthenticated
self.client.logout()
Project.objects.all().delete()
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/login.html')
def test_index(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry', kwargs={'project_id': 1}) + '?sort=freq', follow=False)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
def test_group_details(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_event_list(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-events', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_message_details(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-event', kwargs={'project_id': 1, 'group_id': 2, 'event_id': 4}), follow=True)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_json_multi(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)[0]['level'], 'error')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': 1})
self.assertEquals(resp.status_code, 200)
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': settings.MAX_JSON_RESULTS+1})
self.assertEquals(resp.status_code, 400)
def test_group_events_details_json(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-event-json', kwargs={'project_id': 1, 'group_id': 2, 'event_id_or_latest': 'latest'}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)['level'], 'error')
def test_status_env(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/env.html')
def test_status_packages(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-packages-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/packages.html')
def test_status_queue(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-queue-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/queue.html')
def test_stats(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-stats'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/stats.html')
def test_manage_users(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-users'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/users/list.html')
def test_event_list(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-events', kwargs={'project_id': 1}))
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/events/event_list.html')
def test_replay_event(self):
# bad event_id
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 1}))
self.assertEquals(resp.status_code, 302)
# valid params
# self.client.login(username='admin', password='admin')
# resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 4}))
# self.assertEquals(resp.status_code, 200)
# self.assertTemplateUsed(resp, 'sentry/events/replay.html')
class ViewPermissionTest(TestCase):
"""
These tests simply ensure permission requirements for various views.
"""
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
self.user.set_password('admin')
self.user.save()
self.user2 = User(username="member", email="member@localhost")
self.user2.set_password('member')
self.user2.save()
self.user3 = User(username="nobody", email="nobody@localhost")
self.user3.set_password('nobody')
self.user3.save()
self.user4 = User(username="owner", email="owner@localhost")
self.user4.set_password('owner')
self.user4.save()
self.team = Team.objects.create(owner=self.user4, name='foo')
self.project = Project.objects.get(id=1)
self.project.update(public=False, team=self.team)
self.tm = TeamMember.objects.get_or_create(
user=self.user2,
team=self.team,
type=MEMBER_USER,
)[0]
TeamMember.objects.get_or_create(
user=self.user4,
team=self.team,
type=MEMBER_OWNER,
)[0]
def _assertPerm(self, path, template, account=None, want=True):
"""
Requests ``path`` and asserts that ``template`` is
rendered for ``account`` (Anonymous if None) given ``want``
is Trueish.
"""
if account:
self.assertTrue(self.client.login(username=account, password=account))
else:
self.client.logout()
resp = self.client.get(path)
if want:
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, template)
else:
self.assertEquals(resp.status_code, 302)
self.assertTemplateNotUsed(resp, template)
def test_project_list(self):
path = reverse('sentry-project-list')
template = 'sentry/projects/list.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody')
self._assertPerm(path, template, None, False)
def test_new_project(self):
path = reverse('sentry-new-project')
template = 'sentry/projects/new.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, None, False)
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody')
self._assertPerm(path, template, None, False)
def test_manage_project(self):
path = reverse('sentry-manage-project', kwargs={'project_id': 1})
template = 'sentry/projects/manage.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_remove_project(self):
# We cant delete the default project
with self.Settings(SENTRY_PROJECT=2):
path = reverse('sentry-remove-project', kwargs={'project_id': 1})
template = 'sentry/projects/remove.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_new_team_member(self):
path = reverse('sentry-new-team-member', kwargs={'team_slug': self.team.slug})
template = 'sentry/teams/members/new.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_edit_team_member(self):
path = reverse('sentry-edit-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
template = 'sentry/teams/members/edit.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_remove_team_member(self):
path = reverse('sentry-remove-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
template = 'sentry/teams/members/remove.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
class SentrySearchTest(TestCase):
def test_checksum_query(self):
checksum = 'a' * 32
g = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver%s' % (g.get_absolute_url(),))
def test_dupe_checksum(self):
checksum = 'a' * 32
g1 = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
g2 = Group.objects.create(
project_id=1,
logger='root',
culprit='b',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True, SENTRY_USE_SEARCH=False):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sentry/search.html')
context = response.context
self.assertTrue('event_list' in context)
self.assertEquals(len(context['event_list']), 2)
self.assertTrue(g1 in context['event_list'])
self.assertTrue(g2 in context['event_list'])
class SentryHelpersTest(TestCase):
def test_get_db_engine(self):
from sentry.utils.db import get_db_engine
_databases = getattr(django_settings, 'DATABASES', {}).copy()
django_settings.DATABASES['default'] = {'ENGINE': 'blah.sqlite3'}
self.assertEquals(get_db_engine(), 'sqlite3')
django_settings.DATABASES['default'] = {'ENGINE': 'blah.mysql'}
self.assertEquals(get_db_engine(), 'mysql')
django_settings.DATABASES = _databases
def test_get_login_url(self):
with self.Settings(LOGIN_URL='/really-a-404'):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-login'))
with self.Settings(LOGIN_URL=reverse('sentry-fake-login')):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-fake-login'))
# should still be cached
with self.Settings(LOGIN_URL='/really-a-404'):
url = get_login_url(False)
self.assertEquals(url, reverse('sentry-fake-login'))
with self.Settings(SENTRY_LOGIN_URL=None):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-login'))
|
bsd-3-clause
| -5,759,128,192,834,405,000
| 41.647383
| 148
| 0.63284
| false
| 3.800884
| true
| false
| false
|
ocefpaf/pycsw
|
pycsw/plugins/outputschemas/__init__.py
|
1
|
1360
|
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
__all__ = ['atom', 'dif', 'fgdc', 'gm03']
|
mit
| 4,767,057,192,456,334,000
| 42.870968
| 67
| 0.677206
| false
| 4.401294
| false
| false
| false
|
BrewCenter/BrewCenterAPI
|
brewcenter_api/brew_data/data_miner/brew_target/yeast.py
|
1
|
3040
|
from brew_data.data_miner.brew_target.utils import clean
class Yeast:
def __init__(self, data):
self.name = data[0]
self.type = data[1]
self.form = data[2]
self.lab = data[3]
self.min_temp = data[4]
self.max_temp = data[5]
self.flocculation = data[6]
self.attenuation = data[7]
self.notes = clean(data[8])
self.transform()
def transform(self):
self.name = '"' + self.name + '"'
self.type = '"' + self.type + '"'
self.lab = '"' + self.lab + '"'
self.flocculation = '"' + self.flocculation + '"'
# convert "None" notes to empty
if self.notes is None:
self.notes = '""'
else:
self.notes = '"' + self.notes + '"'
self.is_liquid = 0
if self.form == "Liquid":
self.is_liquid = 1
def get_keys():
return ("name, type_id, is_liquid, lab, min_temp, max_temp, "
"flocculation, attenuation, notes")
def __str__(self):
return "{0},{1},{2},{3},{4},{5},{6},{7},{8}".format(
self.name,
self.type_id,
self.is_liquid,
self.lab,
self.min_temp,
self.max_temp,
self.flocculation,
self.attenuation,
self.notes,
)
def get_yeast(s, d, stdout):
"""
Gets yeast from the source database (s), transforms them,
and puts them in the destination database (d)
"""
n = 0
d.execute('DROP TABLE IF EXISTS yeasttype;')
d.execute('DROP TABLE IF EXISTS yeast;')
d.execute('CREATE TABLE yeasttype(name TEXT);')
d.execute('CREATE TABLE yeast(' \
'name TEXT,' \
'type_id int,' \
'is_liquid int,' \
'lab TEXT,' \
'min_temp FLOAT,' \
'max_temp FLOAT,' \
'flocculation FLOAT,' \
'attenuation FLOAT,' \
'notes TEXT' \
');'
)
s.execute('SELECT "name", "ytype", "form", "laboratory", "min_temperature", "max_temperature", "flocculation", "attenuation", "notes" FROM yeast WHERE `deleted`=0;')
cur = s.fetchone()
while cur:
y = Yeast(cur)
# check for the yeast type and set it's foreign id
y.type_id = 'NULL'
if y.type is not 'NULL':
d.execute('SELECT `rowid` FROM yeasttype WHERE name={0};'.format(y.type))
yeast_type_id = d.fetchone()
if yeast_type_id is None:
d.execute('INSERT INTO yeasttype(name) VALUES ({0});'.format(y.type))
d.execute('SELECT `rowid` FROM yeasttype WHERE name={0};'.format(y.type))
yeast_type_id = d.fetchone()
y.type_id = yeast_type_id[0] if yeast_type_id else 'NULL'
d.execute('INSERT INTO yeast({0}) VALUES({1});'.format(Yeast.get_keys(), y))
n+=1
cur = s.fetchone()
print("Found {0} yeast.".format(n))
|
gpl-3.0
| -3,451,735,403,440,097,000
| 31.688172
| 170
| 0.500329
| false
| 3.543124
| false
| false
| false
|
TataneInYourFace/wefill
|
app/forms/order_form.py
|
1
|
1471
|
# -*- coding: utf-8 -*-
import datetime
from django import forms
GAS_QUANTITY = (
('20', '20 Litres'),
('25', '25 Litres'),
('30', '30 Litres'),
('35', '35 Litres'),
('40', '40 Litres'),
('45', '45 Litres'),
('50', '50 Litres'),
('55', '55 Litres'),
('60', '60 Litres'),
)
class OrderForm(forms.Form):
user = forms.IntegerField(widget=forms.HiddenInput(), required=False)
address = forms.ChoiceField()
vehicle = forms.ChoiceField()
gas_name = forms.ChoiceField()
gas_quantity = forms.ChoiceField(widget=forms.Select(attrs={'class':'selectpicker'}), choices=GAS_QUANTITY)
date_refill = forms.DateTimeField(widget=forms.HiddenInput())
def __init__(self, data=None, addresses=None, vehicles=None, gas_choices=None, *args, **kwargs):
super(OrderForm, self).__init__(data, *args, **kwargs)
if addresses is not None:
self.fields['address'] = forms.ChoiceField(
choices=[(str(address['id']), address['name']) for address in addresses]
)
if vehicles is not None:
self.fields['vehicle'] = forms.ChoiceField(
choices=[(str(vehicle['id']), vehicle['name']) for vehicle in vehicles]
)
if gas_choices is not None:
self.fields['gas_name'] = forms.ChoiceField(
choices=[(gas['name'], '{0} - {1} €/L'.format(gas['name'], gas['price'])) for gas in gas_choices]
)
|
mit
| 3,933,943,561,269,895,700
| 33.97619
| 113
| 0.577263
| false
| 3.497619
| false
| false
| false
|
CoBiG2/RAD_Tools
|
segregating_loci_finder.py
|
1
|
3130
|
#!/usr/bin/env python3
# Copyright 2018 Francisco Pina Martins <f.pinamartins@gmail.com>
# segregating_loci_finder.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Loci_counter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Loci_counter. If not, see <http://www.gnu.org/licenses/>.
# This script will compare two groups of individuals and highlight any
# loci that segregate both groups
# Usage: python3 segregating_loci_finder.py /path/to/file.vcf \
# number_of_1st_group_individuals(int)
import re
from collections import Counter
def vcf_parser(vcf_filename, group_split_point):
"""
Parses a vcf file and returns TODO
"""
infile = open(vcf_filename, 'r')
loci = {}
group_split_point = int(group_split_point)
for lines in infile:
if lines.startswith("#"): # Skip headers
if lines.startswith("#CHROM"): # Group checker lines
lines = lines.split()
data = lines[9:]
groups = [data[:group_split_point],
data[group_split_point:]]
print(groups)
else:
lines = lines.split()
locus = lines[0]
data = lines[9:]
groups = [data[:group_split_point], data[group_split_point:]]
gr_freqs = [get_freqs(x) for x in groups]
loci[locus] = gr_freqs
return loci
def get_freqs(vcf_data):
"""
Gets relative frequencies from VCF data
"""
abs_freqs = [re.match(".*?:", x).group(0)[:-1] for x in vcf_data]
dummy_freqs = {"0/0": 0, "0/1": 0, "1/0": 0, "1/1": 0, "./.": 0}
rel_freqs = Counter(abs_freqs)
try:
mvs = rel_freqs.pop("./.")
except KeyError:
mvs = 0
dummy_freqs.update(Counter(abs_freqs))
rel_freqs = dummy_freqs
rel_freqs["0/1"] += rel_freqs.pop("1/0")
try:
non_missing = len(abs_freqs) - mvs
rel_freqs = {k: v/non_missing for k, v in rel_freqs.items()}
except ZeroDivisionError:
rel_freqs = None
# print(rel_freqs)
return rel_freqs
def segregating_freqs(loci):
"""
Defines wether a locus segregates the two groups
For now only works with full segregation
"""
segregators = []
for locus, data in loci.items():
try:
segregators += [locus for k, v in data[0].items()
if (data[1][k] == 0 and v == 1)
or (data[1][k] == 1 and v == 0)]
except AttributeError:
pass
return segregators
if __name__ == "__main__":
from sys import argv
SEG_LOCI = vcf_parser(argv[1], argv[2])
for i in segregating_freqs(SEG_LOCI):
print(i)
|
gpl-3.0
| -5,112,938,407,795,405,000
| 30.938776
| 84
| 0.602875
| false
| 3.552781
| false
| false
| false
|
xavi783/u-tad
|
Modulo8/modules/data/main.py
|
1
|
1377
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 28 17:28:20 2014
@author: xavi783
"""
import json
import numpy as np
import pandas as pd
import pandas.io.data as web
import datetime as dt
from tornado.web import RequestHandler
START_DATE=dt.datetime(2000,1,1)
NAMES = ['AAPL','XOM','MSFT','JNJ','BRK.B','WFC','GE','PG','JPM','PFE']
symbols = pd.concat([web.get_data_yahoo(i, START_DATE)['Adj Close'] for i in NAMES],1)
symbols.columns = NAMES
symbols.index = [i.date() for i in list(symbols.index)]
symbols.index.names = ["date"]
panel_corr = pd.rolling_corr(symbols.pct_change(),21)
dates = np.array(map(lambda d: d.toordinal(), symbols.index))
class StockHandler(RequestHandler):
def get(self):
self.write(symbols.to_csv())
self.finish()
class CorrelationHandler(RequestHandler):
encoder = json.JSONEncoder()
def get_correlation(self,*date):
f = lambda x: x[x<0][-1];
find_date = lambda d,dates: list(np.argwhere(f((dates-dt.datetime(*d).toordinal()))==(dates-dt.datetime(*d).toordinal())).flat)[0]
get_date = lambda d,dates: symbols.ix[find_date(d,dates)+[1,2],:].index[0]
return json.dumps((panel_corr[get_date(date,dates)].values).tolist())
def post(self):
fecha = tuple([int(i) for i in self.request.body.split("-")])
self.write(self.encoder.encode(self.get_correlation(*fecha)))
|
gpl-3.0
| -2,924,266,324,681,359,400
| 31.809524
| 138
| 0.660857
| false
| 3.053215
| false
| false
| false
|
phiros/nepi
|
src/nepi/resources/ns3/ns3propagationdelaymodel.py
|
1
|
1830
|
#
# NEPI, a framework to manage network experiments
# Copyright (C) 2014 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
from nepi.execution.resource import clsinit_copy
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
class NS3BasePropagationDelayModel(NS3Base):
_rtype = "abstract::ns3::PropagationDelayModel"
@property
def simulation(self):
return self.channel.simulation
@property
def channel(self):
from nepi.resources.ns3.ns3wifichannel import NS3BaseWifiChannel
channels = self.get_connected(NS3BaseWifiChannel.get_rtype())
if not channels:
msg = "PropagationDelayModel not connected to channel"
self.error(msg)
raise RuntimeError, msg
return channels[0]
@property
def _rms_to_wait(self):
others = set()
others.add(self.channel)
return others
def _connect_object(self):
channel = self.channel
if channel.uuid not in self.connected:
self.simulation.invoke(channel.uuid, "SetPropagationDelayModel", self.uuid)
self._connected.add(channel.uuid)
|
gpl-3.0
| -5,356,266,568,432,127,000
| 32.888889
| 87
| 0.693443
| false
| 3.893617
| false
| false
| false
|
tlambert03/grouper
|
test.py
|
1
|
1056
|
import grouper as G
from multiprocessing import Pool
import sys
cores = 8 if len(sys.argv) < 2 else int(float(sys.argv[1]))
iterations_per_thread = 1000 if len(sys.argv) < 3 else int(float(sys.argv[2]))
n = G.params.numrotations
stations = G.params.stations
if __name__ == '__main__':
p = Pool(processes=cores)
results = p.map(G.parallel_shuffle, [(stations,n,iterations_per_thread,iterations_per_thread/10)]*cores)
print
print
print "--------------"
print 'BEST RESULT:'
print
bench = float("inf")
for sol in results:
s = G.scoreSolution(sol)
gs = sum([g[0] for g in s])
rs = sum([g[1] for g in s])
cs = gs * rs
if cs < bench:
bench = cs
best = sol
print best.printSol()
s = G.scoreSolution(best)
gs = sum([g[0] for g in s])
rs = sum([g[1] for g in s])
cs = gs * rs
print sol.part
print "Partition Score: ", gs
print sol.schedule
print "Rotation Score: ", rs
print "Combo Score: ", cs
print "--------------"
|
mit
| -1,905,808,509,131,380,000
| 26.102564
| 108
| 0.569129
| false
| 3.28972
| false
| false
| false
|
tschalch/pyTray
|
src/gui/error_window.py
|
1
|
2153
|
#!/usr/bin/env python2.3
"""
Error window that pops up and displays unhandled errors
"""
from wx.lib.dialogs import *
import wx
import sys, traceback
class ErrorDialog(wx.Dialog):
def __init__(self, parent, msg, caption,
pos=wx.DefaultPosition, size=(500,300),
style=wx.DEFAULT_DIALOG_STYLE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
x, y = pos
if x == -1 and y == -1:
self.CenterOnScreen(wx.BOTH)
self.text = wx.TextCtrl(self, -1, msg,
style=wx.TE_MULTILINE | wx.TE_READONLY)
okID = wx.NewId()
ok = wx.Button(self, okID, "OK")
self.Bind(wx.EVT_BUTTON, self.OnButton, ok)
self.Bind(wx.EVT_CLOSE, self.OnButton)
ok.SetDefault()
lc = layoutf.Layoutf('t=t5#1;b=t5#2;l=l5#1;r=r5#1', (self,ok))
self.text.SetConstraints(lc)
lc = layoutf.Layoutf('b=b5#1;x%w50#1;w!80;h*', (self,))
ok.SetConstraints(lc)
self.SetAutoLayout(1)
self.Layout()
def OnButton(self, event):
self.Destroy()
def write(self, msg):
self.text.AppendText(msg)
class ErrorHandler:
def __init__(self):
self.dialog = None
def write(self, msg):
try:
if not self.dialog:
self.dialog = ErrorDialog(None, "Ooops, this looks like bug! Please send the error message to schalch@mol.biol.ethz.ch\n\n", "ErrorWindow")
self.dialog.Show()
if not self.dialog.IsShown():
self.dialog = ErrorDialog(None, "Error:", "ErrorWindow")
self.dialog.Show()
self.dialog.write(msg)
except:
sys.stderr = sys.__stderr__
print traceback.print_exc(file=sys.stdout)
raise
class GuiApp(wx.App):
def OnInit(self):
return True
if __name__ == "__main__":
app = GuiApp(0)
app.MainLoop()
hdl = ErrorHandler()
hdl.write("Test")
|
bsd-3-clause
| 2,089,554,167,221,337,300
| 29.691176
| 155
| 0.51974
| false
| 3.523732
| false
| false
| false
|
mgrygoriev/CloudFerry
|
cloudferrylib/os/discovery/stages.py
|
1
|
2752
|
# Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_utils import importutils
from cloudferrylib import stage
from cloudferrylib.os.discovery import model
LOG = logging.getLogger(__name__)
class DiscoverStage(stage.Stage):
def __init__(self):
super(DiscoverStage, self).__init__()
self.missing_clouds = None
def invalidate(self, old_signature, new_signature, force=False):
"""
Remove data related to any cloud that changed signature.
"""
if force:
with model.Session() as session:
session.delete()
return
self.missing_clouds = []
# Create set of cloud names that which data is not valid anymore
old_clouds = set(old_signature.keys())
invalid_clouds = old_clouds.difference(new_signature.keys())
for name, signature in new_signature.items():
if name not in old_signature:
self.missing_clouds.append(name)
continue
if old_signature[name] != signature:
self.missing_clouds.append(name)
invalid_clouds.add(name)
with model.Session() as session:
for cloud in invalid_clouds:
session.delete(cloud=cloud)
def signature(self, config):
"""
Discovery signature is based on configuration. Each configured cloud
have it's own signature.
"""
return {n: [c.credential.auth_url, c.credential.region_name]
for n, c in config.clouds.items()}
def execute(self, config):
"""
Execute discovery.
"""
if self.missing_clouds is None:
self.missing_clouds = config.clouds.keys()
for cloud_name in self.missing_clouds:
cloud = config.clouds[cloud_name]
for class_name in cloud.discover:
cls = importutils.import_class(class_name)
LOG.info('Starting discover %s objects in %s cloud',
cls.__name__, cloud_name)
cls.discover(cloud)
LOG.info('Done discovering %s objects in %s cloud',
cls.__name__, cloud_name)
|
apache-2.0
| -3,448,828,013,168,188,400
| 34.282051
| 76
| 0.616642
| false
| 4.417335
| true
| false
| false
|
daatrujillopu/Sfotipy
|
actions.py
|
1
|
1933
|
__author__ = 'danny'
import csv
import logging
import tablib
from datetime import datetime
from django.db.models import Model
from django.db.models.fields.files import FieldFile
from unicodedata import normalize
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.template import Context, Template
from django.conf import settings
from django.core.urlresolvers import reverse
def export_as_excel(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=%s.xls' % str(opts).replace('.', '_')
try:
field_names = modeladmin.model.get_csv_fields()
v_field_names = field_names
except:
field_names = [field.name for field in opts.fields]
v_field_names = [getattr(field, 'verbose_name') or field.name for field in opts.fields]
v_field_names = map(lambda x: x if x != 'ID' else 'Id', v_field_names)
ax = []
headers = v_field_names
data = []
data = tablib.Dataset(*data, headers=headers)
for obj in queryset:
acc = []
for field in field_names:
try:
uf = getattr(obj, field)()
except TypeError:
try:
uf = getattr(obj, field)
except:
uf = ' error obteniendo el dato'
if uf is None:
uf = ''
elif isinstance(uf, datetime):
uf = str(uf)
elif isinstance(uf, Model):
uf = str(uf)
elif isinstance(uf, FieldFile):
uf = str(uf.url)
acc.append(uf)
data.append(acc)
response.write(data.xls)
return response
export_as_excel.short_description = "Exportar como Excel"
|
mit
| -5,425,386,611,052,412,000
| 31.233333
| 97
| 0.612519
| false
| 3.985567
| false
| false
| false
|
stefan-jonasson/home-assistant
|
homeassistant/components/zwave/node_entity.py
|
1
|
8320
|
"""Entity class that represents Z-Wave node."""
import logging
from homeassistant.core import callback
from homeassistant.const import ATTR_BATTERY_LEVEL, ATTR_WAKEUP, ATTR_ENTITY_ID
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from .const import (
ATTR_NODE_ID, COMMAND_CLASS_WAKE_UP, ATTR_SCENE_ID, ATTR_SCENE_DATA,
ATTR_BASIC_LEVEL, EVENT_NODE_EVENT, EVENT_SCENE_ACTIVATED, DOMAIN,
COMMAND_CLASS_CENTRAL_SCENE)
from .util import node_name
_LOGGER = logging.getLogger(__name__)
ATTR_QUERY_STAGE = 'query_stage'
ATTR_AWAKE = 'is_awake'
ATTR_READY = 'is_ready'
ATTR_FAILED = 'is_failed'
ATTR_PRODUCT_NAME = 'product_name'
ATTR_MANUFACTURER_NAME = 'manufacturer_name'
ATTR_NODE_NAME = 'node_name'
STAGE_COMPLETE = 'Complete'
_REQUIRED_ATTRIBUTES = [
ATTR_QUERY_STAGE, ATTR_AWAKE, ATTR_READY, ATTR_FAILED,
'is_info_received', 'max_baud_rate', 'is_zwave_plus']
_OPTIONAL_ATTRIBUTES = ['capabilities', 'neighbors', 'location']
_COMM_ATTRIBUTES = [
'sentCnt', 'sentFailed', 'retries', 'receivedCnt', 'receivedDups',
'receivedUnsolicited', 'sentTS', 'receivedTS', 'lastRequestRTT',
'averageRequestRTT', 'lastResponseRTT', 'averageResponseRTT']
ATTRIBUTES = _REQUIRED_ATTRIBUTES + _OPTIONAL_ATTRIBUTES
class ZWaveBaseEntity(Entity):
"""Base class for Z-Wave Node and Value entities."""
def __init__(self):
"""Initialize the base Z-Wave class."""
self._update_scheduled = False
self.old_entity_id = None
self.new_entity_id = None
def maybe_schedule_update(self):
"""Maybe schedule state update.
If value changed after device was created but before setup_platform
was called - skip updating state.
"""
if self.hass and not self._update_scheduled:
self.hass.add_job(self._schedule_update)
@callback
def _schedule_update(self):
"""Schedule delayed update."""
if self._update_scheduled:
return
@callback
def do_update():
"""Really update."""
self.hass.async_add_job(self.async_update_ha_state)
self._update_scheduled = False
self._update_scheduled = True
self.hass.loop.call_later(0.1, do_update)
def sub_status(status, stage):
"""Format sub-status."""
return '{} ({})'.format(status, stage) if stage else status
class ZWaveNodeEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node."""
def __init__(self, node, network, new_entity_ids):
"""Initialize node."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._network = network
self.node = node
self.node_id = self.node.node_id
self._name = node_name(self.node)
self._product_name = node.product_name
self._manufacturer_name = node.manufacturer_name
self.old_entity_id = "{}.{}_{}".format(
DOMAIN, slugify(self._name), self.node_id)
self.new_entity_id = "{}.{}".format(DOMAIN, slugify(self._name))
if not new_entity_ids:
self.entity_id = self.old_entity_id
self._attributes = {}
self.wakeup_interval = None
self.location = None
self.battery_level = None
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_NOTIFICATION)
dispatcher.connect(
self.network_node_event, ZWaveNetwork.SIGNAL_NODE_EVENT)
dispatcher.connect(
self.network_scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT)
def network_node_changed(self, node=None, value=None, args=None):
"""Handle a changed node on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and 'nodeId' in args and \
args['nodeId'] != self.node_id:
return
# Process central scene activation
if (value is not None and
value.command_class == COMMAND_CLASS_CENTRAL_SCENE):
self.central_scene_activated(value.index, value.data)
self.node_changed()
def get_node_statistics(self):
"""Retrieve statistics from the node."""
return self._network.manager.getNodeStatistics(
self._network.home_id, self.node_id)
def node_changed(self):
"""Update node properties."""
attributes = {}
stats = self.get_node_statistics()
for attr in ATTRIBUTES:
value = getattr(self.node, attr)
if attr in _REQUIRED_ATTRIBUTES or value:
attributes[attr] = value
for attr in _COMM_ATTRIBUTES:
attributes[attr] = stats[attr]
if self.node.can_wake_up():
for value in self.node.get_values(COMMAND_CLASS_WAKE_UP).values():
self.wakeup_interval = value.data
break
else:
self.wakeup_interval = None
self.battery_level = self.node.get_battery_level()
self._attributes = attributes
self.maybe_schedule_update()
def network_node_event(self, node, value):
"""Handle a node activated event on the network."""
if node.node_id == self.node.node_id:
self.node_event(value)
def node_event(self, value):
"""Handle a node activated event for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_NODE_EVENT, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_BASIC_LEVEL: value
})
def network_scene_activated(self, node, scene_id):
"""Handle a scene activated event on the network."""
if node.node_id == self.node.node_id:
self.scene_activated(scene_id)
def scene_activated(self, scene_id):
"""Handle an activated scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_SCENE_ID: scene_id
})
def central_scene_activated(self, scene_id, scene_data):
"""Handle an activated central scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node_id,
ATTR_SCENE_ID: scene_id,
ATTR_SCENE_DATA: scene_data
})
@property
def state(self):
"""Return the state."""
if ATTR_READY not in self._attributes:
return None
stage = ''
if not self._attributes[ATTR_READY]:
# If node is not ready use stage as sub-status.
stage = self._attributes[ATTR_QUERY_STAGE]
if self._attributes[ATTR_FAILED]:
return sub_status('Dead', stage)
if not self._attributes[ATTR_AWAKE]:
return sub_status('Sleeping', stage)
if self._attributes[ATTR_READY]:
return sub_status('Ready', stage)
return stage
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self.node_id,
ATTR_NODE_NAME: self._name,
ATTR_MANUFACTURER_NAME: self._manufacturer_name,
ATTR_PRODUCT_NAME: self._product_name,
'old_entity_id': self.old_entity_id,
'new_entity_id': self.new_entity_id,
}
attrs.update(self._attributes)
if self.battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = self.battery_level
if self.wakeup_interval is not None:
attrs[ATTR_WAKEUP] = self.wakeup_interval
return attrs
|
mit
| 3,290,718,244,363,379,000
| 33.238683
| 79
| 0.608053
| false
| 3.873371
| false
| false
| false
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/topological.py
|
1
|
11300
|
# topological.py
# Copyright (C) 2005, 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms.
The topological sort is an algorithm that receives this list of
dependencies as a *partial ordering*, that is a list of pairs which
might say, *X is dependent on Y*, *Q is dependent on Z*, but does not
necessarily tell you anything about Q being dependent on X. Therefore,
its not a straight sort where every element can be compared to
another... only some of the elements have any sorting preference, and
then only towards just some of the other elements. For a particular
partial ordering, there can be many possible sorts that satisfy the
conditions.
"""
from sqlalchemy import util
from sqlalchemy.exceptions import CircularDependencyError
class _Node(object):
"""Represent each item in the sort.
While the topological sort produces a straight ordered list of
items, ``_Node`` ultimately stores a tree-structure of those items
which are organized so that non-dependent nodes are siblings.
"""
def __init__(self, item):
self.item = item
self.dependencies = util.Set()
self.children = []
self.cycles = None
def __str__(self):
return self.safestr()
def safestr(self, indent=0):
return (' ' * indent * 2) + \
str(self.item) + \
(self.cycles is not None and (" (cycles: " + repr([x for x in self.cycles]) + ")") or "") + \
"\n" + \
''.join([n.safestr(indent + 1) for n in self.children])
def __repr__(self):
return "%s" % (str(self.item))
def all_deps(self):
"""Return a set of dependencies for this node and all its cycles."""
deps = util.Set(self.dependencies)
if self.cycles is not None:
for c in self.cycles:
deps.update(c.dependencies)
return deps
class _EdgeCollection(object):
"""A collection of directed edges."""
def __init__(self):
self.parent_to_children = {}
self.child_to_parents = {}
def add(self, edge):
"""Add an edge to this collection."""
(parentnode, childnode) = edge
if parentnode not in self.parent_to_children:
self.parent_to_children[parentnode] = util.Set()
self.parent_to_children[parentnode].add(childnode)
if childnode not in self.child_to_parents:
self.child_to_parents[childnode] = util.Set()
self.child_to_parents[childnode].add(parentnode)
parentnode.dependencies.add(childnode)
def remove(self, edge):
"""Remove an edge from this collection.
Return the childnode if it has no other parents.
"""
(parentnode, childnode) = edge
self.parent_to_children[parentnode].remove(childnode)
self.child_to_parents[childnode].remove(parentnode)
if len(self.child_to_parents[childnode]) == 0:
return childnode
else:
return None
def has_parents(self, node):
return node in self.child_to_parents and len(self.child_to_parents[node]) > 0
def edges_by_parent(self, node):
if node in self.parent_to_children:
return [(node, child) for child in self.parent_to_children[node]]
else:
return []
def get_parents(self):
return self.parent_to_children.keys()
def pop_node(self, node):
"""Remove all edges where the given node is a parent.
Return the collection of all nodes which were children of the
given node, and have no further parents.
"""
children = self.parent_to_children.pop(node, None)
if children is not None:
for child in children:
self.child_to_parents[child].remove(node)
if not self.child_to_parents[child]:
yield child
def __len__(self):
return sum([len(x) for x in self.parent_to_children.values()])
def __iter__(self):
for parent, children in self.parent_to_children.iteritems():
for child in children:
yield (parent, child)
def __str__(self):
return repr(list(self))
def __repr__(self):
return repr(list(self))
class QueueDependencySorter(object):
"""Topological sort adapted from wikipedia's article on the subject.
It creates a straight-line list of elements, then a second pass
batches non-dependent elements as siblings in a tree structure. Future
versions of this algorithm may separate the "convert to a tree"
step.
"""
def __init__(self, tuples, allitems):
self.tuples = tuples
self.allitems = allitems
def sort(self, allow_self_cycles=True, allow_all_cycles=False):
(tuples, allitems) = (self.tuples, self.allitems)
#print "\n---------------------------------\n"
#print repr([t for t in tuples])
#print repr([a for a in allitems])
#print "\n---------------------------------\n"
nodes = {}
edges = _EdgeCollection()
for item in allitems + [t[0] for t in tuples] + [t[1] for t in tuples]:
if item not in nodes:
node = _Node(item)
nodes[item] = node
for t in tuples:
if t[0] is t[1]:
if allow_self_cycles:
n = nodes[t[0]]
n.cycles = util.Set([n])
continue
else:
raise CircularDependencyError("Self-referential dependency detected " + repr(t))
childnode = nodes[t[1]]
parentnode = nodes[t[0]]
edges.add((parentnode, childnode))
queue = []
for n in nodes.values():
if not edges.has_parents(n):
queue.append(n)
output = []
while nodes:
if not queue:
# edges remain but no edgeless nodes to remove; this indicates
# a cycle
if allow_all_cycles:
for cycle in self._find_cycles(edges):
lead = cycle[0][0]
lead.cycles = util.Set()
for edge in cycle:
n = edges.remove(edge)
lead.cycles.add(edge[0])
lead.cycles.add(edge[1])
if n is not None:
queue.append(n)
for n in lead.cycles:
if n is not lead:
n._cyclical = True
for (n,k) in list(edges.edges_by_parent(n)):
edges.add((lead, k))
edges.remove((n,k))
continue
else:
# long cycles not allowed
raise CircularDependencyError("Circular dependency detected " + repr(edges) + repr(queue))
node = queue.pop()
if not hasattr(node, '_cyclical'):
output.append(node)
del nodes[node.item]
for childnode in edges.pop_node(node):
queue.append(childnode)
return self._create_batched_tree(output)
def _create_batched_tree(self, nodes):
"""Given a list of nodes from a topological sort, organize the
nodes into a tree structure, with as many non-dependent nodes
set as siblings to each other as possible.
"""
if not nodes:
return None
# a list of all currently independent subtrees as a tuple of
# (root_node, set_of_all_tree_nodes, set_of_all_cycle_nodes_in_tree)
# order of the list has no semantics for the algorithmic
independents = []
# in reverse topological order
for node in util.reversed(nodes):
# nodes subtree and cycles contain the node itself
subtree = util.Set([node])
if node.cycles is not None:
cycles = util.Set(node.cycles)
else:
cycles = util.Set()
# get a set of dependent nodes of node and its cycles
nodealldeps = node.all_deps()
if nodealldeps:
# iterate over independent node indexes in reverse order so we can efficiently remove them
for index in xrange(len(independents)-1,-1,-1):
child, childsubtree, childcycles = independents[index]
# if there is a dependency between this node and an independent node
if (childsubtree.intersection(nodealldeps) or childcycles.intersection(node.dependencies)):
# prepend child to nodes children
# (append should be fine, but previous implemetation used prepend)
node.children[0:0] = (child,)
# merge childs subtree and cycles
subtree.update(childsubtree)
cycles.update(childcycles)
# remove the child from list of independent subtrees
independents[index:index+1] = []
# add node as a new independent subtree
independents.append((node,subtree,cycles))
# choose an arbitrary node from list of all independent subtrees
head = independents.pop()[0]
# add all other independent subtrees as a child of the chosen root
# used prepend [0:0] instead of extend to maintain exact behaviour of previous implementation
head.children[0:0] = [i[0] for i in independents]
return head
def _find_cycles(self, edges):
involved_in_cycles = util.Set()
cycles = {}
def traverse(node, goal=None, cycle=None):
if goal is None:
goal = node
cycle = []
elif node is goal:
return True
for (n, key) in edges.edges_by_parent(node):
if key in cycle:
continue
cycle.append(key)
if traverse(key, goal, cycle):
cycset = util.Set(cycle)
for x in cycle:
involved_in_cycles.add(x)
if x in cycles:
existing_set = cycles[x]
[existing_set.add(y) for y in cycset]
for y in existing_set:
cycles[y] = existing_set
cycset = existing_set
else:
cycles[x] = cycset
cycle.pop()
for parent in edges.get_parents():
traverse(parent)
# sets are not hashable, so uniquify with id
unique_cycles = dict([(id(s), s) for s in cycles.values()]).values()
for cycle in unique_cycles:
edgecollection = [edge for edge in edges
if edge[0] in cycle and edge[1] in cycle]
yield edgecollection
|
bsd-3-clause
| 3,143,276,866,390,748,000
| 37.69863
| 111
| 0.549115
| false
| 4.448819
| false
| false
| false
|
gugarosa/LibKeras
|
train.py
|
1
|
3047
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import common
import gzip
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle as pkl
import sys
from data import load_data
from itertools import product
from os.path import join
from sklearn.model_selection import train_test_split
# Default directory
root_out = common.default_path() + '/outputs'
# Type of data entries
n_runs = 1
datasets = [{'name': 'brain', 'n_classes': 2}]
data_types = [{'type': 'matlab', 'ref': False}]
normalization_methods = ['none']
test_sizes = [0.2]
params = list(product(datasets, data_types, normalization_methods, test_sizes))
# Fixed parameters
learning_rate = 0.01
momentum = 0.9
decay = 0.0005
nesterov = True
batch_size = 32
n_epochs = 90
val_size = 0.1
metric = 'accuracy'
loss_func = 'categorical_crossentropy'
results = []
# Loop to hold all the desired configurations
for d, dt, nm, ts in params:
for i in range(n_runs):
data, labels = load_data(d['name'], dt['type'], dt['ref'], nm)
input_shape = data.shape[1:]
# Splitting data into training and test sets
data_train, data_test, lab_train, lab_test = train_test_split(data, labels, test_size=ts, random_state=i)
# Building CNN, note that you can choose the build function according to common.py
cnet = common.ConvNet()
cnet.build_samplenet(include_top=True, weights=None, input_shape=input_shape, classes=d['n_classes'])
# Compiling current network
cnet.compile(learning_rate=learning_rate, momentum=momentum, decay=decay, nesterov=nesterov, metric=metric, loss_func=loss_func)
# Training current network
cnet.train(data_train, lab_train, d['n_classes'], batch_size=batch_size, n_epochs=n_epochs, validation_size=val_size, loss_func=loss_func)
# Evaluating current network
acc = cnet.evaluate(data_test, lab_test, d['n_classes'], batch_size)
# Saving network model
mname = '%s_model.h5' % (d['name'])
cnet.save_model(join(root_out, 'models', mname))
# Saving trained network weights
wname = '%s_%s_%s_%.2f_%02i.h5' % (d['name'], dt['type'], nm, ts, i)
cnet.save_weight(join(root_out, 'weights', wname))
# Plotting the accuracy history
history = cnet.get_history()
fname = '%s_%s_%s_%.2f_%02i' % (d['name'], dt['type'], nm, ts, i)
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.savefig(join(root_out, 'history', 'history_' + fname + '.jpg'))
plt.close()
# Dumping history to a .pkl file
pkl.dump(history, gzip.open(join(root_out, 'history', 'history_' + fname + '.pkl'), 'wb'))
# Saving results output on a .csv file
results.append([d['name'], dt['type'], nm, ts, i, acc])
cnet = None
df = pd.DataFrame(results, columns=['dataset', 'data_type', 'normalization_method', 'test_size', 'running_num', 'acc'])
df.to_csv(join(root_out, 'results.csv'))
# End of current iteration
print("\n[INFO] Running #{:d} ok!".format(i))
import gc; gc.collect()
|
gpl-3.0
| -7,683,564,740,032,855,000
| 32.119565
| 140
| 0.685921
| false
| 2.955383
| true
| false
| false
|
juanyunis/django-resumes
|
resumes/admin.py
|
1
|
1253
|
from django.contrib import admin
from models import *
class UserResumeEducationInline(admin.StackedInline):
model = UserResumeEducation
extra = 1
class UserResumeLanguageInline(admin.StackedInline):
model = UserResumeLanguage
extra = 1
class UserResumeInterestInline(admin.StackedInline):
model = UserResumeInterest
extra = 1
class UserResumeQualificationInline(admin.StackedInline):
model = UserResumeQualification
extra = 1
class UserResumeJobInline(admin.StackedInline):
model = UserResumeJob
extra = 1
class UserResumeReferenceInline(admin.StackedInline):
model = UserResumeReferences
extra = 1
class UserResumeAdmin(admin.ModelAdmin):
inlines = [
UserResumeEducationInline, UserResumeLanguageInline,
UserResumeInterestInline, UserResumeQualificationInline,
UserResumeJobInline, UserResumeReferenceInline
]
admin.site.register(Company)
admin.site.register(School)
admin.site.register(UserResume, UserResumeAdmin)
admin.site.register(UserResumeEducation)
admin.site.register(UserResumeLanguage)
admin.site.register(UserResumeInterest)
admin.site.register(UserResumeQualification)
admin.site.register(UserResumeJob)
admin.site.register(UserResumeReferences)
|
mit
| -1,410,418,413,467,217,700
| 24.591837
| 64
| 0.794094
| false
| 3.915625
| false
| false
| false
|
rhd/meson
|
mesonbuild/backend/ninjabackend.py
|
1
|
122266
|
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import modules
from .. import environment, mesonlib
from .. import build
from .. import mlog
from .. import dependencies
from .. import compilers
from ..compilers import CompilerArgs
from ..mesonlib import File, MesonException, OrderedSet
from ..mesonlib import get_meson_script, get_compiler_for_source
from .backends import CleanTrees, InstallData
from ..build import InvalidArguments
import os, sys, pickle, re
import subprocess, shutil
from collections import OrderedDict
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
rmfile_prefix = 'del /f /s /q {} &&'
else:
quote_char = "'"
execute_wrapper = ''
rmfile_prefix = 'rm -f {} &&'
def ninja_quote(text):
for char in ('$', ' ', ':'):
text = text.replace(char, '$' + char)
if '\n' in text:
raise MesonException('Ninja does not support newlines in rules. '
'Please report this error with a test case to the Meson bug tracker.')
return text
class NinjaBuildElement:
def __init__(self, all_outputs, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
self.all_outputs = all_outputs
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
self.check_outputs()
line = 'build %s: %s %s' % (
' '.join([ninja_quote(i) for i in self.outfilenames]),
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
# All the entries that should remain unquoted
raw_names = {'DEPFILE', 'DESC', 'pool', 'description'}
for e in self.elems:
(name, elems) = e
should_quote = name not in raw_names
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
def check_outputs(self):
for n in self.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.name = 'ninja'
self.ninja_filename = 'build.ninja'
self.target_arg_cache = {}
self.fortran_deps = {}
self.all_outputs = {}
def detect_vs_dep_prefix(self, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
# Of course there is another program called 'cl' on
# some platforms. Let's just require that on Windows
# cl points to msvc.
if not mesonlib.is_windows() or shutil.which('cl') is None:
return open(tempfilename, 'a')
filename = os.path.join(self.environment.get_scratch_dir(),
'incdetect.c')
with open(filename, 'w') as f:
f.write('''#include<stdio.h>
int dummy;
''')
# The output of cl dependency information is language
# and locale dependent. Any attempt at converting it to
# Python strings leads to failure. We _must_ do this detection
# in raw byte mode and write the result in raw bytes.
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
cwd=self.environment.get_scratch_dir(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdo, _) = pc.communicate()
# We want to match 'Note: including file: ' in the line
# 'Note: including file: d:\MyDir\include\stdio.h', however
# different locales have different messages with a different
# number of colons. Match up to the the drive name 'd:\'.
matchre = re.compile(rb"^(.*\s)[a-zA-Z]:\\.*stdio.h$")
for line in stdo.split(b'\r\n'):
match = matchre.match(line)
if match:
with open(tempfilename, 'ab') as binfile:
binfile.write(b'msvc_deps_prefix = ' + match.group(1) + b'\n')
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
with open(tempfilename, 'w') as outfile:
outfile.write('# This is the build file for project "%s"\n' %
self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
with self.detect_vs_dep_prefix(tempfilename) as outfile:
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
for t in self.build.get_targets().values():
self.generate_target(t, outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
self.generate_dist(outfile)
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_utils(outfile)
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
native_compilers = ['%s_COMPILER' % i for i in self.build.compilers]
cross_compilers = ['%s_CROSS_COMPILER' % i for i in self.build.cross_compilers]
ninja_compdb = [ninja_exe, '-t', 'compdb'] + native_compilers + cross_compilers
builddir = self.environment.get_build_dir()
try:
jsondb = subprocess.check_output(ninja_compdb, cwd=builddir)
with open(os.path.join(builddir, 'compile_commands.json'), 'wb') as f:
f.write(jsondb)
except Exception:
mlog.warning('Could not create compilation database.')
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
# XXX: Why don't we add deps to CustomTarget headers here?
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
for src in genlist.get_outputs():
if self.environment.is_header(src):
header_deps.append(self.get_target_generated_dir(target, genlist, src))
if 'vala' in target.compilers and not isinstance(target, build.Executable):
vala_header = File.from_built_file(self.get_target_dir(target), target.vala_header)
header_deps.append(vala_header)
# Recurse and find generated headers
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def get_target_generated_sources(self, target):
"""
Returns a dictionary with the keys being the path to the file
(relative to the build directory) of that type and the value
being the GeneratorList or CustomTarget that generated it.
"""
srcs = OrderedDict()
for gensrc in target.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(target, gensrc, s)
srcs[f] = s
return srcs
def get_target_sources(self, target):
srcs = OrderedDict()
for s in target.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
raise InvalidArguments('All sources in target {!r} must be of type mesonlib.File'.format(s))
f = s.rel_to_builddir(self.build_to_src)
srcs[f] = s
return srcs
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
langs_cant_unity = ('d', 'fortran')
def get_target_source_can_unity(self, target, source):
if isinstance(source, File):
source = source.fname
if self.environment.is_llvm_ir(source) or \
self.environment.is_assembly(source):
return False
suffix = os.path.splitext(source)[1][1:]
for lang in self.langs_cant_unity:
if lang not in target.compilers:
continue
if suffix in target.compilers[lang].file_suffixes:
return False
return True
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
if name in self.processed_targets:
return
self.processed_targets[name] = True
# Generate rules for all dependency targets
self.process_target_dependencies(target, outfile)
# If target uses a language that cannot link to C objects,
# just generate for that language and return.
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in target.compilers:
self.generate_rust_target(target, outfile)
return
if 'cs' in target.compilers:
self.generate_cs_target(target, outfile)
return
if 'swift' in target.compilers:
self.generate_swift_target(target, outfile)
return
# Now we handle the following languages:
# ObjC++, ObjC, C++, C, D, Fortran, Vala
# target_sources:
# Pre-existing target C/C++ sources to be built; dict of full path to
# source relative to build root and the original File object.
# generated_sources:
# GeneratedList and CustomTarget sources to be built; dict of the full
# path to source relative to build root and the generating target/list
# vala_generated_sources:
# Array of sources generated by valac that have to be compiled
if 'vala' in target.compilers:
# Sources consumed by valac are filtered out. These only contain
# C/C++ sources, objects, generated libs, and unknown sources now.
target_sources, generated_sources, \
vala_generated_sources = self.generate_vala_compile(target, outfile)
else:
target_sources = self.get_target_sources(target)
generated_sources = self.get_target_generated_sources(target)
vala_generated_sources = []
self.scan_fortran_module_outputs(target)
# Generate rules for GeneratedLists
self.generate_generator_list_rules(target, outfile)
# Generate rules for building the remaining source files in this target
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.base_options.get('b_pch', False)
is_unity = self.is_unity(target)
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = []
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
if is_unity:
# Warn about incompatible sources if a unity build is enabled
langs = set(target.compilers.keys())
langs_cant = langs.intersection(self.langs_cant_unity)
if langs_cant:
langs_are = langs = ', '.join(langs_cant).upper()
langs_are += ' are' if len(langs_cant) > 1 else ' is'
msg = '{} not supported in Unity builds yet, so {} ' \
'sources in the {!r} target will be compiled normally' \
''.format(langs_are, langs, target.name)
mlog.log(mlog.red('FIXME'), msg)
# Get a list of all generated headers that will be needed while building
# this target's sources (generated sources and pre-existing sources).
# This will be set as dependencies of all the target's sources. At the
# same time, also deal with generated sources that need to be compiled.
generated_source_files = []
for rel_src, gensrc in generated_sources.items():
dirpart, fnamepart = os.path.split(rel_src)
raw_src = File(True, dirpart, fnamepart)
if self.environment.is_source(rel_src) and not self.environment.is_header(rel_src):
if is_unity and self.get_target_source_can_unity(target, rel_src):
unity_deps.append(raw_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
generated_source_files.append(raw_src)
elif self.environment.is_object(rel_src):
obj_list.append(rel_src)
elif self.environment.is_library(rel_src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(raw_src)
# These are the generated source files that need to be built for use by
# this target. We create the Ninja build file elements for this here
# because we need `header_deps` to be fully generated in the above loop.
for src in generated_source_files:
if self.environment.is_llvm_ir(src):
o = self.generate_llvm_ir_compile(target, outfile, src)
else:
o = self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps)
obj_list.append(o)
# Generate compilation targets for C sources generated from Vala
# sources. This can be extended to other $LANG->C compilers later if
# necessary. This needs to be separate for at least Vala
vala_generated_source_files = []
for src in vala_generated_sources:
dirpart, fnamepart = os.path.split(src)
raw_src = File(True, dirpart, fnamepart)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(raw_src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(raw_src)
else:
# We gather all these and generate compile rules below
# after `header_deps` (above) is fully generated
vala_generated_source_files.append(raw_src)
for src in vala_generated_source_files:
# Passing 'vala' here signifies that we want the compile
# arguments to be specialized for C code generated by
# valac. For instance, no warnings should be emitted.
obj_list.append(self.generate_single_compile(target, outfile, src, 'vala', [], header_deps))
# Generate compile targets for all the pre-existing sources for this target
for f, src in target_sources.items():
if not self.environment.is_header(src):
if self.environment.is_llvm_ir(src):
obj_list.append(self.generate_llvm_ir_compile(target, outfile, src))
elif is_unity and self.get_target_source_can_unity(target, src):
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if tname not in self.processed_targets:
self.generate_target(t, outfile)
def custom_target_generator_inputs(self, target, outfile):
for s in target.sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, build.GeneratedList):
self.generate_genlist_for_target(s, target, outfile)
def unwrap_dep_list(self, target):
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
# Add a dependency on all the outputs of this target
for output in i.get_outputs():
deps.append(os.path.join(self.get_target_dir(i), output))
return deps
def generate_custom_target(self, target, outfile):
self.custom_target_generator_inputs(target, outfile)
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = self.unwrap_dep_list(target)
deps += self.get_custom_target_depend_files(target)
desc = 'Generating {0} with a {1} command.'
if target.build_always:
deps.append('PHONY')
if target.depfile is None:
rulename = 'CUSTOM_COMMAND'
else:
rulename = 'CUSTOM_COMMAND_DEP'
elem = NinjaBuildElement(self.all_outputs, ofilenames, rulename, srcs)
elem.add_dep(deps)
for d in target.extra_depends:
# Add a dependency on all the outputs of this target
for output in d.get_outputs():
elem.add_dep(os.path.join(self.get_target_dir(d), output))
# If the target requires capturing stdout, then use the serialized
# executable wrapper to capture that output and save it to a file.
#
# If the command line requires a newline, also use the wrapper, as
# ninja does not support them in its build rule syntax.
#
# Windows doesn't have -rpath, so for EXEs that need DLLs built within
# the project, we need to set PATH so the DLLs are found. We use
# a serialized executable wrapper for that and check if the
# CustomTarget command needs extra paths first.
if (target.capture or any('\n' in c for c in cmd) or
((mesonlib.is_windows() or mesonlib.is_cygwin()) and
self.determine_windows_extra_paths(target.command[0]))):
exe_data = self.serialize_executable(target.command[0], cmd[1:],
# All targets are built from the build dir
self.environment.get_build_dir(),
capture=ofilenames[0] if target.capture else None)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'exe', exe_data]
cmd_type = 'meson_exe.py custom'
else:
cmd_type = 'custom'
if target.depfile is not None:
rel_dfile = os.path.join(self.get_target_dir(target), target.depfile)
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
elem.add_item('DEPFILE', rel_dfile)
elem.add_item('COMMAND', cmd)
elem.add_item('description', desc.format(target.name, cmd_type))
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
cmd = [sys.executable, self.environment.get_build_command(), '--internal', 'commandrunner']
deps = self.unwrap_dep_list(target)
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
deps.append(relfname)
elif isinstance(i, mesonlib.File):
relfname = i.rel_to_builddir(self.build_to_src)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
raise AssertionError('Unreachable code in generate_run_target: ' + str(i))
elem = NinjaBuildElement(self.all_outputs, target.name, 'CUSTOM_COMMAND', [])
cmd += [self.environment.get_source_dir(),
self.environment.get_build_dir(),
target.subdir,
get_meson_script(self.environment, 'mesonintrospect')]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() and \
self.environment.cross_info.need_exe_wrapper():
exe_wrap = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
if exe_wrap is not None:
cmd += [exe_wrap]
cmd.append(abs_exe)
elif isinstance(texe, dependencies.ExternalProgram):
cmd += texe.get_command()
elif isinstance(texe, build.CustomTarget):
deps.append(self.get_target_filename(texe))
cmd += [os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))]
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_dep(deps)
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_coverage_rules(self, outfile):
e = NinjaBuildElement(self.all_outputs, 'coverage', 'CUSTOM_COMMAND', 'PHONY')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'coverage',
self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_log_dir()])
e.add_item('description', 'Generates coverage reports.')
e.write(outfile)
self.generate_coverage_legacy_rules(outfile)
def generate_coverage_legacy_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement(self.all_outputs, 'coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
if lcov_exe and genhtml_exe:
added_rule = True
htmloutdir = os.path.join(self.environment.get_log_dir(), 'coveragereport')
covinfo = os.path.join(self.environment.get_log_dir(), 'coverage.info')
phony_elem = NinjaBuildElement(self.all_outputs, 'coverage-html', 'phony', os.path.join(htmloutdir, 'index.html'))
phony_elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, os.path.join(htmloutdir, 'index.html'), 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),
'--capture', '--output-file', covinfo, '--no-checksum',
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),
'--output-directory', htmloutdir, '--title', 'Code coverage',
'--legend', '--show-details', covinfo]
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
elem.write(outfile)
if not added_rule:
mlog.warning('coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
if self.environment.is_cross_build():
bins = self.environment.cross_info.config['binaries']
if 'strip' not in bins:
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
strip_bin = None
else:
strip_bin = mesonlib.stringlistify(bins['strip'])
else:
strip_bin = self.environment.native_strip_bin
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
strip_bin,
get_meson_script(self.environment, 'mesonintrospect'))
elem = NinjaBuildElement(self.all_outputs, 'install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, self.environment.get_build_command(), '--internal', 'install', install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
with open(install_data_file, 'wb') as ofile:
pickle.dump(d, ofile)
def generate_target_install(self, d):
for t in self.build.get_targets().values():
if not t.should_install():
continue
# Find the installation directory.
outdirs = t.get_custom_install_dir()
custom_install_dir = False
if outdirs[0] is not None and outdirs[0] is not True:
# Either the value is set, or is set to False which means
# we want this specific output out of many outputs to not
# be installed.
custom_install_dir = True
elif isinstance(t, build.SharedModule):
outdirs[0] = self.environment.get_shared_module_dir()
elif isinstance(t, build.SharedLibrary):
outdirs[0] = self.environment.get_shared_lib_dir()
elif isinstance(t, build.StaticLibrary):
outdirs[0] = self.environment.get_static_lib_dir()
elif isinstance(t, build.Executable):
outdirs[0] = self.environment.get_bindir()
else:
assert(isinstance(t, build.BuildTarget))
# XXX: Add BuildTarget-specific install dir cases here
outdirs[0] = self.environment.get_libdir()
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
# Install the target output(s)
if isinstance(t, build.BuildTarget):
should_strip = self.get_option_for_target('strip', t)
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
i = [self.get_target_filename(t), outdirs[0],
t.get_aliases(), should_strip, t.install_rpath]
d.targets.append(i)
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if isinstance(t, build.SharedLibrary) and t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library.
i = [self.get_target_filename_for_linking(t),
implib_install_dir,
# It has no aliases, should not be stripped, and
# doesn't have an install_rpath
{}, False, '']
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir in zip(t.get_outputs()[1:], outdirs[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output in t.get_outputs():
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdirs[0], {}, False, None])
else:
for output, outdir in zip(t.get_outputs(), outdirs):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
def generate_custom_install_script(self, d):
result = []
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for i in self.build.install_scripts:
exe = i['exe']
args = i['args']
fixed_args = []
for a in args:
a = a.replace('@SOURCE_ROOT@', srcdir)
a = a.replace('@BUILD_ROOT@', builddir)
fixed_args.append(a)
result.append(build.RunScript(exe, fixed_args))
d.install_scripts = result
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
if not isinstance(f, File):
msg = 'Invalid header type {!r} can\'t be installed'
raise MesonException(msg.format(f))
abspath = f.absolute_path(srcdir, builddir)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, os.path.split(f)[1] + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
assert(isinstance(f, mesonlib.File))
plain_f = os.path.split(f.fname)[1]
dstabs = os.path.join(subdir, plain_f)
i = [f.absolute_path(srcdir, builddir), dstabs, de.install_mode]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
inst_subdir = sd.installable_subdir.rstrip('/')
idir_parts = inst_subdir.split('/')
if len(idir_parts) > 1:
subdir = os.path.join(sd.source_subdir, '/'.join(idir_parts[:-1]))
inst_dir = idir_parts[-1]
else:
subdir = sd.source_subdir
inst_dir = sd.installable_subdir
src_dir = os.path.join(self.environment.get_source_dir(), subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, inst_dir, dst_dir, sd.install_mode])
def generate_tests(self, outfile):
self.serialize_tests()
test_exe = get_meson_script(self.environment, 'mesontest')
cmd = [sys.executable, '-u', test_exe, '--no-rebuild']
if not self.environment.coredata.get_builtin_option('stdsplit'):
cmd += ['--no-stdsplit']
if self.environment.coredata.get_builtin_option('errorlogs'):
cmd += ['--print-errorlogs']
elem = NinjaBuildElement(self.all_outputs, 'test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
# And then benchmarks.
cmd = [sys.executable, '-u', test_exe, '--benchmark', '--logbase',
'benchmarklog', '--num-processes=1', '--no-rebuild']
elem = NinjaBuildElement(self.all_outputs, 'benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
# Ninja errors out if you have deps = gcc but no depfile, so we must
# have two rules for custom commands.
outfile.write('rule CUSTOM_COMMAND_DEP\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' deps = gcc\n')
outfile.write(' depfile = $DEPFILE\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
'--internal',
'regenerate',
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s %s %s --backend ninja\n" % c)
outfile.write(' description = Regenerating build files.\n')
outfile.write(' generator = 1\n\n')
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = target.compilers['java']
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c + m + e + f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
# Java compilation can produce an arbitrary number of output
# class files for a single source file. Thus tell jar to just
# grab everything in the final package.
commands += ['-C', self.get_target_private_dir(target), '.']
elem = NinjaBuildElement(self.all_outputs, outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(self.all_outputs, ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return args, deps
def generate_cs_target(self, target, outfile):
buildtype = self.get_option_for_target('buildtype', target)
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = target.compilers['cs']
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(self.all_outputs, outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.get_option_for_target('buildtype', target))
args += self.build.get_global_args(compiler)
args += self.build.get_project_args(compiler, target.subproject)
args += target.get_java_args()
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating JAR $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def determine_dep_vapis(self, target):
"""
Peek into the sources of BuildTargets we're linking with, and if any of
them was built with Vala, assume that it also generated a .vapi file of
the same name as the BuildTarget and return the path to it relative to
the build directory.
"""
result = OrderedSet()
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = dep.name + '.vapi'
fullname = os.path.join(self.get_target_dir(dep), vapiname)
result.add(fullname)
break
return list(result)
def split_vala_sources(self, t):
"""
Splits the target's sources into .vala, .vapi, and other sources.
Handles both pre-existing and generated sources.
Returns a tuple (vala, vapi, others) each of which is a dictionary with
the keys being the path to the file (relative to the build directory)
and the value being the object that generated or represents the file.
"""
vala = OrderedDict()
vapi = OrderedDict()
others = OrderedDict()
othersgen = OrderedDict()
# Split pre-existing sources
for s in t.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
msg = 'All sources in target {!r} must be of type ' \
'mesonlib.File, not {!r}'.format(t, s)
raise InvalidArguments(msg)
f = s.rel_to_builddir(self.build_to_src)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
else:
srctype = others
srctype[f] = s
# Split generated sources
for gensrc in t.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(t, gensrc, s)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
# Generated non-Vala (C/C++) sources. Won't be used for
# generating the Vala compile rule below.
else:
srctype = othersgen
# Duplicate outputs are disastrous
if f in srctype and srctype[f] is not gensrc:
msg = 'Duplicate output {0!r} from {1!r} {2!r}; ' \
'conflicts with {0!r} from {4!r} {3!r}' \
''.format(f, type(gensrc).__name__, gensrc.name,
srctype[f].name, type(srctype[f]).__name__)
raise InvalidArguments(msg)
# Store 'somefile.vala': GeneratedList (or CustomTarget)
srctype[f] = gensrc
return vala, vapi, (others, othersgen)
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
(vala_src, vapi_src, other_src) = self.split_vala_sources(target)
extra_dep_files = []
if not vala_src:
msg = 'Vala library {!r} has no Vala source files.'
raise InvalidArguments(msg.format(target.name))
valac = target.compilers['vala']
c_out_dir = self.get_target_private_dir(target)
# C files generated by valac
vala_c_src = []
# Files generated by valac
valac_outputs = []
# All sources that are passed to valac on the commandline
all_files = list(vapi_src.keys())
for (vala_file, gensrc) in vala_src.items():
all_files.append(vala_file)
# Figure out where the Vala compiler will write the compiled C file
# If the Vala file is in a subdir of the build dir (in our case
# because it was generated/built by something else), the subdir path
# components will be preserved in the output path. But if the Vala
# file is outside the build directory, the path components will be
# stripped and just the basename will be used.
if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built:
vala_c_file = os.path.splitext(vala_file)[0] + '.c'
else:
vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'
# All this will be placed inside the c_out_dir
vala_c_file = os.path.join(c_out_dir, vala_c_file)
vala_c_src.append(vala_c_file)
valac_outputs.append(vala_c_file)
args = self.generate_basic_compiler_args(target, valac)
# Tell Valac to output everything in our private directory. Sadly this
# means it will also preserve the directory components of Vala sources
# found inside the build tree (generated sources).
args += ['-d', c_out_dir]
if not isinstance(target, build.Executable):
# Library name
args += ['--library=' + target.name]
# Outputted header
hname = os.path.join(self.get_target_dir(target), target.vala_header)
args += ['-H', hname, '--use-header']
valac_outputs.append(hname)
# Outputted vapi file
vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi)
# Force valac to write the vapi and gir files in the target build dir.
# Without this, it will write it inside c_out_dir
args += ['--vapi', os.path.join('..', target.vala_vapi)]
valac_outputs.append(vapiname)
target.outputs += [target.vala_header, target.vala_vapi]
# Install header and vapi to default locations if user requests this
if len(target.install_dir) > 1 and target.install_dir[1] is True:
target.install_dir[1] = self.environment.get_includedir()
if len(target.install_dir) > 2 and target.install_dir[2] is True:
target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi')
# Generate GIR if requested
if isinstance(target.vala_gir, str):
girname = os.path.join(self.get_target_dir(target), target.vala_gir)
args += ['--gir', os.path.join('..', target.vala_gir)]
valac_outputs.append(girname)
target.outputs.append(target.vala_gir)
# Install GIR to default location if requested by user
if len(target.install_dir) > 3 and target.install_dir[3] is True:
target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0')
# Detect gresources and add --gresources arguments for each
for (gres, gensrc) in other_src[1].items():
if isinstance(gensrc, modules.GResourceTarget):
gres_xml, = self.get_custom_target_sources(gensrc)
args += ['--gresources=' + gres_xml]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
element = NinjaBuildElement(self.all_outputs, valac_outputs,
valac.get_language() + '_COMPILER',
all_files + dependency_vapis)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
return other_src[0], other_src[1], vala_c_src
def generate_rust_target(self, target, outfile):
rustc = target.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.get_option_for_target('buildtype', target))
depfile = os.path.join(target.subdir, target.name + '.d')
args += ['--emit', 'dep-info={}'.format(depfile), '--emit', 'link']
args += target.get_extra_args('rust')
args += ['-o', os.path.join(target.subdir, target.get_filename())]
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = OrderedDict()
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(self.all_outputs, target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = self.get_target_generated_sources(target)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return srcs, others
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = target.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(self.all_outputs, rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.all_outputs, self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if 'java' in self.build.compilers:
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
# We don't use @file.rsp on Windows with ArLinker because llvm-ar and
# gcc-ar blindly pass the --plugin argument to `ar` and you cannot pass
# options as arguments while using the @file.rsp syntax.
# See: https://github.com/mesonbuild/meson/issues/1646
if mesonlib.is_windows() and not isinstance(static_linker, compilers.ArLinker):
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS {output_args} $in
'''
else:
command_template = ' command = {executable} $LINK_ARGS {output_args} $in\n'
cmdlist = []
# FIXME: Must normalize file names with pathlib.Path before writing
# them out to fix this properly on Windows. See:
# https://github.com/mesonbuild/meson/issues/1517
# https://github.com/mesonbuild/meson/issues/1526
if isinstance(static_linker, compilers.ArLinker) and not mesonlib.is_windows():
# `ar` has no options to overwrite archives. It always appends,
# which is never what we want. Delete an existing library first if
# it exists. https://github.com/mesonbuild/meson/issues/1355
cmdlist = [execute_wrapper, rmfile_prefix.format('$out')]
cmdlist += static_linker.get_exelist()
command = command_template.format(
executable=' '.join(cmdlist),
output_args=' '.join(static_linker.get_output_args('$out')))
description = ' description = Linking static target $out.\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for langname, compiler in complist.items():
if langname == 'java' \
or langname == 'vala' \
or langname == 'rust' \
or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing
'''
else:
command_template = ' command = {executable} $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing\n'
command = command_template.format(
executable=' '.join(compiler.get_linker_exelist()),
cross_args=' '.join(cross_args),
output_args=' '.join(compiler.get_linker_output_args('$out'))
)
description = ' description = Linking target $out.'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s %s %s $CROSS\n' % (ninja_quote(sys.executable),
self.environment.get_build_command(),
'--internal',
'symbolextractor',
'$in',
'$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling C Sharp target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [ninja_quote(sys.executable),
ninja_quote(self.environment.get_build_command()),
'--internal',
'dirchanger',
'$RUNDIR']
invoc = (' '.join(full_exe) + ' ' +
' '.join(ninja_quote(i) for i in compiler.get_exelist()))
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_llvm_ir_compile_rule(self, compiler, is_cross, outfile):
if getattr(self, 'created_llvm_ir_rule', False):
return
rule = 'rule llvm_ir{}_COMPILER\n'.format('_CROSS' if is_cross else '')
if mesonlib.is_windows():
command_template = ' command = {executable} @$out.rsp\n' \
' rspfile = $out.rsp\n' \
' rspfile_content = {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
else:
command_template = ' command = {executable} {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling LLVM IR object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
self.created_llvm_ir_rule = True
def get_cross_info_lang_args(self, lang, is_cross):
if is_cross:
try:
return self.environment.cross_info.config['properties'][lang + '_args']
except KeyError:
pass
return []
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = self.get_cross_info_lang_args(langname, is_cross)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in
'''
else:
command_template = ' command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling %s object $out.\n' % langname.title()
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n".format(
executable=' '.join(compiler.get_exelist()),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=output,
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Precompiling header %s.\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for langname, compiler in self.build.compilers.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, False, outfile)
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for langname, compiler in cclist.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, True, outfile)
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def generate_generator_list_rules(self, target, outfile):
# CustomTargets have already written their rules,
# so write rules for GeneratedLists here
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
self.generate_genlist_for_target(genlist, target, outfile)
def generate_genlist_for_target(self, genlist, target, outfile):
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_inputs()
outfilelist = genlist.get_outputs()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
source_target_dir = self.get_target_source_dir(target)
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = curfile.rel_to_builddir(self.build_to_src)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
if generator.depfile is None:
rulename = 'CUSTOM_COMMAND'
args = base_args
else:
rulename = 'CUSTOM_COMMAND_DEP'
depfilename = generator.get_dep_outname(infilename)
depfile = os.path.join(self.get_target_private_dir(target), depfilename)
args = [x.replace('@DEPFILE@', depfile) for x in base_args]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)
for x in args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
args = [x.replace("@CURRENT_SOURCE_DIR@", source_target_dir) for x in args]
args = [x.replace("@SOURCE_ROOT@", self.build_to_src).replace("@BUILD_ROOT@", '.')
for x in args]
cmdlist = exe_arr + self.replace_extra_args(args, genlist)
elem = NinjaBuildElement(self.all_outputs, outfiles, rulename, infilename)
if generator.depfile is not None:
elem.add_item('DEPFILE', depfile)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
def scan_fortran_module_outputs(self, target):
compiler = None
for lang, c in self.build.compilers.items():
if lang == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
# custom_target() and generator() as those are run after
# the configuration (configure_file() is OK)
if not compiler.can_compile(s):
continue
filename = s.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
with open(filename) as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure':
# MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments(
'Namespace collision: module %s defined in '
'two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps = self.fortran_deps[target.get_basename()]
with open(src) as f:
for line in f:
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This
# is due to:
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as
# OpenMP
# There's no easy way to tell which is which (that I
# know of) so just ignore this and go on. Ideally we
# would print a warning message to the user but this is
# a common occurrence, which would lead to lots of
# distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(
usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def get_cross_stdlib_args(self, target, compiler):
if not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(compiler.language):
return []
return compiler.get_no_stdinc_args()
def get_compile_debugfile_args(self, compiler, target, objfile):
if compiler.id != 'msvc':
return []
# The way MSVC uses PDB files is documented exactly nowhere so
# the following is what we have been able to decipher via
# reverse engineering.
#
# Each object file gets the path of its PDB file written
# inside it. This can be either the final PDB (for, say,
# foo.exe) or an object pdb (for foo.obj). If the former, then
# each compilation step locks the pdb file for writing, which
# is a bottleneck and object files from one target can not be
# used in a different target. The latter seems to be the
# sensible one (and what Unix does) but there is a catch. If
# you try to use precompiled headers MSVC will error out
# because both source and pch pdbs go in the same file and
# they must be the same.
#
# This means:
#
# - pch files must be compiled anew for every object file (negating
# the entire point of having them in the first place)
# - when using pch, output must go to the target pdb
#
# Since both of these are broken in some way, use the one that
# works for each target. This unfortunately means that you
# can't combine pch and object extraction in a single target.
#
# PDB files also lead to filename collisions. A target foo.exe
# has a corresponding foo.pdb. A shared library foo.dll _also_
# has pdb file called foo.pdb. So will a static library
# foo.lib, which clobbers both foo.pdb _and_ the dll file's
# export library called foo.lib (by default, currently we name
# them libfoo.a to avoidt this issue). You can give the files
# unique names such as foo_exe.pdb but VC also generates a
# bunch of other files which take their names from the target
# basename (i.e. "foo") and stomp on each other.
#
# CMake solves this problem by doing two things. First of all
# static libraries do not generate pdb files at
# all. Presumably you don't need them and VC is smart enough
# to look up the original data when linking (speculation, not
# tested). The second solution is that you can only have
# target named "foo" as an exe, shared lib _or_ static
# lib. This makes filename collisions not happen. The downside
# is that you can't have an executable foo that uses a shared
# library libfoo.so, which is a common idiom on Unix.
#
# If you feel that the above is completely wrong and all of
# this is actually doable, please send patches.
if target.has_pch():
tfilename = self.get_target_filename_abs(target)
return compiler.get_compile_debugfile_args(tfilename, pch=True)
else:
return compiler.get_compile_debugfile_args(objfile, pch=False)
def get_link_debugfile_args(self, linker, target, outname):
return linker.get_link_debugfile_args(outname)
def generate_llvm_ir_compile(self, target, outfile, src):
compiler = get_compiler_for_source(target.compilers.values(), src)
commands = CompilerArgs(compiler)
# Compiler args for compiling this target
commands += compilers.get_base_compile_args(self.environment.coredata.base_options,
compiler)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
if isinstance(src, File) and src.is_built:
rel_src = src.fname
elif isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
# Write the Ninja build command
compiler_name = 'llvm_ir{}_COMPILER'.format('_CROSS' if target.is_cross else '')
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def get_source_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_custom_target_dir_include_args(self, target, compiler):
custom_target_include_dirs = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, build.CustomTarget):
continue
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
incs = []
for i in custom_target_include_dirs:
incs += compiler.get_include_args(i, False)
return incs
def _generate_single_compile(self, target, compiler, is_generated=False):
base_proxy = backends.OptionOverrideProxy(target.option_overrides,
self.environment.coredata.base_options)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = CompilerArgs(compiler)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(base_proxy,
compiler)
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = (is_generated == 'vala')
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands += self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
# This is handled in BuildTarget.process_kwargs()
#
# Include dirs from internal deps should override include dirs from
# external deps and must maintain the order in which they are specified.
# Hence, we must reverse the list so that the order is preserved.
for i in reversed(target.get_include_dirs()):
basedir = i.get_curdir()
for d in i.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ('', '.'):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.join(self.build_to_src, expdir)
# Add source subdir first so that the build subdir overrides it
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += sargs
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, i.is_system)
else:
bargs = []
commands += bargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(compiler,
target.get_extra_args(compiler.get_language()))
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir and the custom target dir over the
# source dir since, for instance, the user might have an
# srcdir == builddir Autotools build in their source tree. Many
# projects that are moving to Meson have both Meson and Autotools in
# parallel as part of the transition.
commands += self.get_source_dir_include_args(target, compiler)
commands += self.get_custom_target_dir_include_args(target, compiler)
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
return commands
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
if isinstance(src, str) and src.endswith('.h'):
raise AssertionError('BUG: sources should not contain headers {!r}'.format(src))
compiler = get_compiler_for_source(target.compilers.values(), src)
key = (target, compiler, is_generated)
if key in self.target_arg_cache:
commands = self.target_arg_cache[key]
else:
commands = self._generate_single_compile(target, compiler, is_generated)
self.target_arg_cache[key] = commands
commands = CompilerArgs(commands.compiler, commands)
if isinstance(src, mesonlib.File) and src.is_built:
rel_src = os.path.join(src.subdir, src.fname)
if os.path.isabs(rel_src):
assert(rel_src.startswith(self.environment.get_build_dir()))
rel_src = rel_src[len(self.environment.get_build_dir())+1:]
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
elif isinstance(src, mesonlib.File):
rel_src = src.rel_to_builddir(self.build_to_src)
abs_src = src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
elif is_generated:
raise AssertionError('BUG: broken generated source file handling for {!r}'.format(src))
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
if os.path.isabs(src_filename):
assert(src_filename.startswith(self.environment.get_build_dir()))
src_filename = src_filename[len(self.environment.get_build_dir())+1:]
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.base_options.get('b_pch', False):
commands += self.get_pch_include_args(compiler, target)
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if not pchlist:
pch_dep = []
elif compiler.id == 'intel':
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
# Can't read source file to scan for deps if it's generated later
# at build-time. Skip scanning for deps, and just set the module
# outdir argument instead.
# https://github.com/mesonbuild/meson/issues/1348
if not is_generated:
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(self.all_outputs, modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def has_dir_part(self, fname):
# FIXME FIXME: The usage of this is a terrible and unreliable hack
if isinstance(fname, File):
return fname.subdir != ''
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
commands += self.get_compile_debugfile_args(compiler, target, objname)
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [objname]
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [] # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if not pch:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
msg = 'Precompiled header of {!r} must not be in the same ' \
'directory as source, please put it in a subdirectory.' \
''.format(target.get_basename())
raise InvalidArguments(msg)
compiler = target.compilers[lang]
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
elif compiler.id == 'intel':
# Intel generates on target generation
continue
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(self.all_outputs, dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(self.all_outputs, symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
def get_cross_stdlib_link_args(self, target, linker):
if isinstance(target, build.StaticLibrary) or not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(linker.language):
return []
return linker.get_no_stdlib_link_args()
def get_target_type_link_args(self, target, linker):
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
if isinstance(target, build.Executable):
# Currently only used with the Swift compiler to add '-emit-executable'
commands += linker.get_std_exe_link_args()
# If gui_app, and that's significant on this platform
if target.gui_app and hasattr(linker, 'get_gui_app_args'):
commands += linker.get_gui_app_args()
elif isinstance(target, build.SharedLibrary):
if isinstance(target, build.SharedModule):
commands += linker.get_std_shared_module_link_args()
else:
commands += linker.get_std_shared_lib_link_args()
# All shared libraries are PIC
commands += linker.get_pic_args()
# Add -Wl,-soname arguments on Linux, -install_name on OS X
commands += linker.get_soname_args(target.prefix, target.name, target.suffix,
abspath, target.soversion,
isinstance(target, build.SharedModule))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.vs_module_defs and hasattr(linker, 'gen_vs_module_defs_args'):
commands += linker.gen_vs_module_defs_args(target.vs_module_defs.rel_to_builddir(self.build_to_src))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.import_filename:
commands += linker.gen_import_library_args(os.path.join(target.subdir, target.import_filename))
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
return commands
def get_link_whole_args(self, linker, target):
target_args = self.build_target_link_arguments(linker, target.link_whole_targets)
return linker.get_link_whole_for(target_args) if len(target_args) else []
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
# Create an empty commands list, and start adding link arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
#
# Once all the linker options have been passed, we will start passing
# libraries and library paths from internal and external sources.
commands = CompilerArgs(linker)
# First, the trivial ones that are impossible to override.
#
# Add linker args for linking this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
if not isinstance(target, build.StaticLibrary):
commands += compilers.get_base_link_args(self.environment.coredata.base_options,
linker,
isinstance(target, build.SharedModule))
# Add -nostdlib if needed; can't be overriden
commands += self.get_cross_stdlib_link_args(target, linker)
# Add things like /NOLOGO; usually can't be overriden
commands += linker.get_linker_always_args()
# Add buildtype linker args: optimization level, etc.
commands += linker.get_buildtype_linker_args(self.get_option_for_target('buildtype', target))
# Add /DEBUG and the pdb filename when using MSVC
commands += self.get_link_debugfile_args(linker, target, outname)
# Add link args specific to this BuildTarget type, such as soname args,
# PIC, import library generation, etc.
commands += self.get_target_type_link_args(target, linker)
# Archives that are copied wholesale in the result. Must be before any
# other link targets so missing symbols from whole archives are found in those.
if not isinstance(target, build.StaticLibrary):
commands += self.get_link_whole_args(linker, target)
if not isinstance(target, build.StaticLibrary):
# Add link args added using add_project_link_arguments()
commands += self.build.get_project_link_args(linker, target.subproject)
# Add link args added using add_global_link_arguments()
# These override per-project link arguments
commands += self.build.get_global_link_args(linker)
if not target.is_cross:
# Link args added from the env: LDFLAGS. We want these to
# override all the defaults but not the per-target link args.
commands += self.environment.coredata.external_link_args[linker.get_language()]
# Now we will add libraries and library paths from various sources
# Add link args to link to all internal libraries (link_with:) and
# internal dependencies needed by this target.
if linker_base == 'STATIC':
# Link arguments of static libraries are not put in the command
# line of the library. They are instead appended to the command
# line where the static library is used.
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
# Only non-static built targets need link args and link dependencies
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
# Add link args for c_* or cpp_* build options. Currently this only
# adds c_winlibs and cpp_winlibs when building for Windows. This needs
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
commands += linker.build_rpath_args(self.environment.get_build_dir(),
self.determine_rpath_dirs(target),
target.install_rpath)
# Add libraries generated by custom targets
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets.extend([self.get_dependency_filename(t)
for t in target.link_depends])
elem = NinjaBuildElement(self.all_outputs, outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
return elem
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if prospective not in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
elif isinstance(t, mesonlib.File):
if t.is_built:
return t.relative_name()
else:
return t.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
aliases = target.get_aliases()
for alias, to in aliases.items():
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
try:
os.symlink(to, aliasfile)
except NotImplementedError:
mlog.debug("Library versioning disabled because symlinks are not supported.")
except OSError:
mlog.debug("Library versioning disabled because we do not have symlink creation privileges.")
def generate_custom_target_clean(self, outfile, trees):
e = NinjaBuildElement(self.all_outputs, 'clean-ctlist', 'CUSTOM_COMMAND', 'PHONY')
d = CleanTrees(self.environment.get_build_dir(), trees)
d_file = os.path.join(self.environment.get_scratch_dir(), 'cleantrees.dat')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'cleantrees', d_file])
e.add_item('description', 'Cleaning custom target directories.')
e.write(outfile)
# Write out the data file passed to the script
with open(d_file, 'wb') as ofile:
pickle.dump(d, ofile)
return 'clean-ctlist'
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement(self.all_outputs, 'clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files.')
gcno_elem.write(outfile)
gcda_elem = NinjaBuildElement(self.all_outputs, 'clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files.')
gcda_elem.write(outfile)
def get_user_option_args(self):
cmds = []
for (k, v) in self.environment.coredata.user_options.items():
cmds.append('-D' + k + '=' + (v.value if isinstance(v.value, str) else str(v.value).lower()))
# The order of these arguments must be the same between runs of Meson
# to ensure reproducible output. The order we pass them shouldn't
# affect behavior in any other way.
return sorted(cmds)
def generate_dist(self, outfile):
elem = NinjaBuildElement(self.all_outputs, 'dist', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('DESC', 'Creating source packages')
elem.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'dist',
self.environment.source_dir,
self.environment.build_dir,
sys.executable,
self.environment.get_build_command()])
elem.add_item('pool', 'console')
elem.write(outfile)
# For things like scan-build and other helper tools we might have.
def generate_utils(self, outfile):
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'scanbuild', self.environment.source_dir, self.environment.build_dir,
sys.executable, self.environment.get_build_command()] + self.get_user_option_args()
elem = NinjaBuildElement(self.all_outputs, 'scan-build', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'uninstall']
elem = NinjaBuildElement(self.all_outputs, 'uninstall', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_ending(self, outfile):
targetlist = []
for t in self.get_build_by_default_targets().values():
# Add the first output of each target to the 'all' target so that
# they are all built
targetlist.append(os.path.join(self.get_target_dir(t), t.get_outputs()[0]))
elem = NinjaBuildElement(self.all_outputs, 'all', 'phony', targetlist)
elem.write(outfile)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect Ninja v1.6 or newer')
elem = NinjaBuildElement(self.all_outputs, 'clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning.')
# If we have custom targets in this project, add all their outputs to
# the list that is passed to the `cleantrees.py` script. The script
# will manually delete all custom_target outputs that are directories
# instead of files. This is needed because on platforms other than
# Windows, Ninja only deletes directories while cleaning if they are
# empty. https://github.com/mesonbuild/meson/issues/1220
ctlist = []
for t in self.build.get_targets().values():
if isinstance(t, build.CustomTarget):
# Create a list of all custom target outputs
for o in t.get_outputs():
ctlist.append(os.path.join(self.get_target_dir(t), o))
if ctlist:
elem.add_dep(self.generate_custom_target_clean(outfile, ctlist))
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
deps = self.get_regen_filelist()
elem = NinjaBuildElement(self.all_outputs, 'build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'reconfigure', 'REGENERATE_BUILD', 'PHONY')
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, deps, 'phony', '')
elem.write(outfile)
|
apache-2.0
| 4,618,508,688,966,883,000
| 48.26108
| 132
| 0.579532
| false
| 4.051763
| false
| false
| false
|
Tejal011089/med2-app
|
selling/utils/__init__.py
|
1
|
63144
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, throw
from webnotes.utils import flt, cint
from webnotes.utils import load_json, nowdate, cstr
from webnotes.model.code import get_obj
from webnotes.model.doc import Document
from webnotes import msgprint
from webnotes.model.bean import getlist, copy_doclist
#from webnotes.model.code import get_obj
from webnotes.model.bean import getlist, copy_doclist
from datetime import datetime, timedelta,date
from webnotes.utils.email_lib import sendmail
import json
def get_customer_list(doctype, txt, searchfield, start, page_len, filters):
if webnotes.conn.get_default("cust_master_name") == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
return webnotes.conn.sql("""select %s from `tabCustomer` where docstatus < 2
and (%s like %s or customer_name like %s) order by
case when name like %s then 0 else 1 end,
case when customer_name like %s then 0 else 1 end,
name, customer_name limit %s, %s""" %
(", ".join(fields), searchfield, "%s", "%s", "%s", "%s", "%s", "%s"),
("%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, start, page_len))
def get_accessories(doctype, txt, searchfield, start, page_len, filters):
webnotes.errprint("ijn init ")
@webnotes.whitelist()
def get_item_details(args):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0
}
"""
if isinstance(args, basestring):
args = json.loads(args)
args = webnotes._dict(args)
if args.barcode:
args.item_code = _get_item_code(barcode=args.barcode)
elif not args.item_code and args.serial_no:
args.item_code = _get_item_code(serial_no=args.serial_no)
item_bean = webnotes.bean("Item", args.item_code)
_validate_item_details(args, item_bean.doc)
meta = webnotes.get_doctype(args.doctype)
#return args.item_code
# hack! for Sales Order Item
warehouse_fieldname = "warehouse"
if meta.get_field("reserved_warehouse", parentfield=args.parentfield):
warehouse_fieldname = "reserved_warehouse"
out = _get_basic_details(args, item_bean, warehouse_fieldname)
if meta.get_field("currency"):
out.base_ref_rate = out.basic_rate = out.ref_rate = out.export_rate = 0.0
if args.selling_price_list and args.price_list_currency:
out.update(_get_price_list_rate(args, item_bean, meta))
out.update(_get_item_discount(out.item_group, args.customer))
if out.get(warehouse_fieldname):
out.update(get_available_qty(args.item_code, out.get(warehouse_fieldname)))
out.customer_item_code = _get_customer_item_code(args, item_bean)
if cint(args.is_pos):
pos_settings = get_pos_settings(args.company)
if pos_settings:
out.update(apply_pos_settings(pos_settings, out))
if args.doctype in ("Sales Invoice", "Delivery Note"):
if item_bean.doc.has_serial_no == "Yes" and not args.serial_no:
out.serial_no = _get_serial_nos_by_fifo(args, item_bean)
# accessories= webnotes.conn.sql(""" select item_code from `tabAccessories`
# where parent='%s'"""%args.item_code,as_list=1)
# if accessories:
# return out, accessories
# else:
return out
@webnotes.whitelist()
def get_accssories_details(args):
if isinstance(args, basestring):
args = json.loads(args)
args = webnotes._dict(args)
accessories= webnotes.conn.sql(""" select item_code from `tabAccessories`
where parent='%s'"""%args.item_code,as_list=1)
if accessories:
return accessories
else:
return ''
def _get_serial_nos_by_fifo(args, item_bean):
return "\n".join(webnotes.conn.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s and status='Available'
order by timestamp(purchase_date, purchase_time) asc limit %(qty)s""", {
"item_code": args.item_code,
"warehouse": args.warehouse,
"qty": cint(args.qty)
}))
def _get_item_code(barcode=None, serial_no=None):
if barcode:
input_type = "Barcode"
item_code = webnotes.conn.sql_list("""select name from `tabItem` where barcode=%s""", barcode)
elif serial_no:
input_type = "Serial No"
item_code = webnotes.conn.sql_list("""select item_code from `tabSerial No`
where name=%s""", serial_no)
if not item_code:
throw(_("No Item found with ") + input_type + ": %s" % (barcode or serial_no))
return item_code[0]
def _validate_item_details(args, item):
from utilities.transaction_base import validate_item_fetch
validate_item_fetch(args, item)
# validate if sales item or service item
if args.order_type == "Maintenance":
if item.is_service_item != "Yes":
throw(_("Item") + (" %s: " % item.name) +
_("not a service item.") +
_("Please select a service item or change the order type to Sales."))
elif item.is_sales_item != "Yes":
throw(_("Item") + (" %s: " % item.name) + _("not a sales item"))
def _get_basic_details(args, item_bean, warehouse_fieldname):
item = item_bean.doc
from webnotes.defaults import get_user_default_as_list
user_default_warehouse_list = get_user_default_as_list('warehouse')
user_default_warehouse = user_default_warehouse_list[0] \
if len(user_default_warehouse_list)==1 else ""
out = webnotes._dict({
"item_code": item.name,
"description": item.description_html or item.description,
warehouse_fieldname: user_default_warehouse or item.default_warehouse \
or args.get(warehouse_fieldname),
"income_account": item.default_income_account or args.income_account \
or webnotes.conn.get_value("Company", args.company, "default_income_account"),
"expense_account": item.purchase_account or args.expense_account \
or webnotes.conn.get_value("Company", args.company, "default_expense_account"),
"cost_center": item.default_sales_cost_center or args.cost_center,
"qty": 1.0,
"export_amount": 0.0,
"amount": 0.0,
"batch_no": None,
"item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in
item_bean.doclist.get({"parentfield": "item_tax"})))),
})
for fieldname in ("item_name", "item_group", "barcode", "brand", "stock_uom"):
out[fieldname] = item.fields.get(fieldname)
return out
def _get_price_list_rate(args, item_bean, meta):
ref_rate = webnotes.conn.sql("""select ref_rate from `tabItem Price`
where price_list=%s and item_code=%s and selling=1""",
(args.selling_price_list, args.item_code), as_dict=1)
if not ref_rate:
return {}
# found price list rate - now we can validate
from utilities.transaction_base import validate_currency
validate_currency(args, item_bean.doc, meta)
return {"ref_rate": flt(ref_rate[0].ref_rate) * flt(args.plc_conversion_rate) / flt(args.conversion_rate)}
def _get_item_discount(item_group, customer):
parent_item_groups = [x[0] for x in webnotes.conn.sql("""SELECT parent.name
FROM `tabItem Group` AS node, `tabItem Group` AS parent
WHERE parent.lft <= node.lft and parent.rgt >= node.rgt and node.name = %s
GROUP BY parent.name
ORDER BY parent.lft desc""", (item_group,))]
discount = 0
for d in parent_item_groups:
res = webnotes.conn.sql("""select discount, name from `tabCustomer Discount`
where parent = %s and item_group = %s""", (customer, d))
if res:
discount = flt(res[0][0])
break
return {"adj_rate": discount}
def send_sms(msg,sender_no):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
webnotes.errprint("In send SMS ")
webnotes.errprint(ss)
#return ss
args = {}
#msg="Ticket Created"
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
sms_url=webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url')
msg_parameter=webnotes.conn.get_value('SMS Settings', None, 'message_parameter')
receiver_parameter=webnotes.conn.get_value('SMS Settings', None, 'receiver_parameter')
url = sms_url +"?username="+ args["username"] +"&password="+args["password"]+"&sendername="+ args["sendername"] +"&mobileno="+ sender_no +"&message=" + msg
webnotes.errprint(url)
import requests
r = requests.get(url)
def send_email(email,msg):
webnotes.errprint("in email")
#webnotes.msgprint(email)
from webnotes.utils.email_lib import sendmail
sendmail(email, subject="Payment Due Details", msg = msg)
@webnotes.whitelist()
def get_available_qty(item_code, warehouse):
return webnotes.conn.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["projected_qty", "actual_qty"], as_dict=True) or {}
def _get_customer_item_code(args, item_bean):
customer_item_code = item_bean.doclist.get({"parentfield": "item_customer_details",
"customer_name": args.customer})
return customer_item_code and customer_item_code[0].ref_code or None
def get_pos_settings(company):
pos_settings = webnotes.conn.sql("""select * from `tabPOS Setting` where user = %s
and company = %s""", (webnotes.session['user'], company), as_dict=1)
if not pos_settings:
pos_settings = webnotes.conn.sql("""select * from `tabPOS Setting`
where ifnull(user,'') = '' and company = %s""", company, as_dict=1)
return pos_settings and pos_settings[0] or None
def apply_pos_settings(pos_settings, opts):
out = {}
for fieldname in ("income_account", "cost_center", "warehouse", "expense_account"):
if not opts.get(fieldname):
out[fieldname] = pos_settings.get(fieldname)
if out.get("warehouse"):
out["actual_qty"] = get_available_qty(opts.item_code, out.get("warehouse")).get("actual_qty")
return out
@webnotes.whitelist(allow_guest=True)
def get_installation_note(customer,emp_id,_type='POST'):
#return "hello "+customer
qr="select customer_name from `tabCustomer` where customer_name="+customer+" "
res=webnotes.conn.sql(qr)
#return res
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
qry="select name from `tabFiscal Year` where is_fiscal_year_closed='No'"
res1=webnotes.conn.sql(qry)
#return res1[0][0]
from webnotes.model.doc import Document
import time
if res :
d= Document('Installation Note')
d.customer=customer[1:-1]
d.customer_name=customer[1:-1]
d.inst_time=time.strftime("%H:%M:%S")
d.inst_date=today
d.employee_id=emp_id[1:-1]
#return d.employee_id
d.fiscal_year=res1[0][0]
d.company='medsynaptic'
d.territory='India'
d.customer_group='Individual'
#return d.fiscal_year
d.save()
webnotes.conn.commit()
return d.name
else:
d= Document('Customer')
d.customer_name=customer[1:-1]
d.customer_type='Individual'
d.customer_group='Individual'
d.territory='India'
d.save()
webnotes.conn.commit()
c= Document('Installation Note')
c.customer=customer[1:-1]
c.inst_time=time.strftime("%H:%M:%S")
c.inst_date=today
c.fiscal_year=res1[0][0]
c.employee_id=emp_id[1:-1]
c.company='Medsynaptic'
c.territory='India'
c.customer_group='Individual'
c.save()
webnotes.conn.commit()
return c.name
@webnotes.whitelist(allow_guest=True)
def get_customer_issue(installationname,sender_no,message,_type='POST'):
#return installationname[1:-1]
#sender_no1=sender_no[-11:]
qr="select customer,employee_id from `tabInstallation Note` where name='"+installationname[1:-1]+"' "
res=webnotes.conn.sql(qr)
#return qr
x="select customer_name from `tabCustomer` where customer_no='"+sender_no[1:-1]+"' "
y=webnotes.conn.sql(x)
#return x
m= None
if not y :
z="select user_id from `tabEmployee` where cell_number="+sender_no[1:-1]+""
m=webnotes.conn.sql(z)
#return m
w="select status,user_id from `tabEmployee` where name='%s'"%(res[0][1]);
t=webnotes.conn.sql(w)
#return t
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
qry="select name from `tabFiscal Year` where is_fiscal_year_closed='No'"
res1=webnotes.conn.sql(qry)
q=" select territory from `tabCustomer` where name='%s'"%(res[0][0]);
r=webnotes.conn.sql(q)
w="select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = '%s' and defvalue = '%s' and r.role='Manager'"%('territory',r[0][0])
a=webnotes.conn.sql(w)
#return a
from webnotes.model.doc import Document
import time
#if res :
d = Document('Support Ticket')
d.opening_time=time.strftime("%H:%M:%S")
if y:
d.raised_by=y[0][0]
elif m:
d.raised_by=z[0][0]
else:
d.raised_by=sender_no[-11:]
d.subject=installationname[1:-1]
d.customer_name=res[0][0]
d.customer=res[0][0]
d.territory=r[0][0]
d.status='Open'
#d.customer_group='Individual'
d.opening_date=today
#d.fiscal_year=res1[0][0]
d.company='medsynaptic'
d.territory=r[0][0]
#d.raised_by=res[0][1]
if t[0][0] =='Active':
#return t[0][1]
d.assigned_to=t[0][1]
d.assigned_to_higher_level=a[0][0]
else:
d.assigned_to=a[0][0]
d.assigned_to_higher_level=a[0][0]
#d.assigned_to_higher_level=a[0][0]
#return d.fiscal_year
d.save()
webnotes.conn.commit()
#return sender_no[1:-1]
p=send_sms(message[1:-1],sender_no[1:-1])
return d.name
#else:
#d= Document('Customer')
#d.customer_name=customer[1:-1]
#d.customer_group='Individual'
#d.customer_name=customer[1:-1]
#d.territory='India'
#d.save()
#webnotes.conn.commit()
#c= Document('Installation Note')
#c.inst_time=time.strftime("%H:%M:%S")
#c.inst_date=today
#c.customer=customer[1:-1]
#c.customer_name=customer[1:-1]
#c.complaint=complaint[1:-1]
#c.status='Open'
#c.complaint_date=today
#c.fiscal_year=res1[0][0]
#c.company='medsynaptic'
#c.territory='India'
#c.complaint_raised_by=customer[1:-1]
#c.save()
#webnotes.conn.commit()
#return c.name
@webnotes.whitelist(allow_guest=True)
def get_support_ticket(code,sender_no,message,_type='POST'):
#return "hello"
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
from webnotes.model.doc import Document
import time
#return sender_no[1:-1]
if code[1:-1] =="CRT":
#return "hello"
#return sender_no[1:-1]
msg="Dear Customer,According to your request ticket is created"
d= Document('Support Ticket')
d.opening_time=time.strftime("%H:%M:%S")
d.opening_date=today
d.subject=message[1:-1]
d.raised_by=sender_no[1:-1]
d.company='medsynaptic'
d.status='Open'
d.save()
webnotes.conn.commit()
p=send_sms(msg,sender_no[1:-1])
return d.name
elif code[1:-1]=="CLS":
#return "hii"
#msg="Ticket Closed"
#sender_no1=sender_no[-11:]
z="select name from `tabSupport Ticket` where raised_by="+sender_no[1:-1]+" and status='Open'"
x=webnotes.conn.sql(z)
#return x
msg="Dear Customer,according to your request respective ticket is closed"
if x:
g="update `tabSupport Ticket` set status='Closed' where name='%s'"%(x[0][0])
h=webnotes.conn.sql(g)
webnotes.conn.sql("commit")
e=send_sms(msg,sender_no[1:-1])
#webnotes.er
return "Updated"
else:
pass
else:
pass
@webnotes.whitelist(allow_guest=True)
def get_activity_data(code,emp_id,client_name,place,deal_amount,product_sold=None,barcode=None,IR_NO=None,phone_no=None,payment_type=None,payment_mode=None,cheque_no=None,bank=None,cheque_status=None,service_call_type=None):
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
from webnotes.model.doc import Document
import time
#return code
if (code[1:-1] =="SLD" or code =="SLO") and product_sold :
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.product_name=product_sold[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1] =="INND" or code[1:-1] =="INNO" or code[1:1] =="INU") and barcode and IR_NO :
#return barcode
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.ir_no=IR_NO[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="AMCD" or code[1:-1]=="AMCO") and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.ir_no=IR_NO[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="SED" or code[1:-1]=="SEO") and service_call_type and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.service_call_type=service_call_type[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif code[1:-1]=="PR" and payment_type and payment_mode and cheque_no and bank and cheque_status and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.service_call_type=service_call_type[1:-1]
d.payment_type=payment_type[1:-1]
d.payment_mode=payment_mode[1:-1]
d.cheque_no=cheque_no[1:-1]
d.cheque_bank=bank[1:-1]
d.cheque_status=cheque_status[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="DC") and phone_no and product_sold:
#return phone_no[-11:]
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.service_call_type=service_call_type[1:-1]
d.product_name=product_sold[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
c=phone_no[-11:]
d.phone_no=c[1:-1]
d.save()
webnotes.conn.commit()
return d.name
else:
"Last"
@webnotes.whitelist(allow_guest=True)
def get_escalation_for_supportticket(_type='Post'):
#print "get esc"
#val = ''
from webnotes.utils import cstr
aa="select distinct(subdate(CURDATE(), 1)) from `tabHoliday` where subdate(CURDATE(), 1) not in (select holiday_date from `tabHoliday` where parent='2014-2015/Maharashtra/001')"
res=webnotes.conn.sql(aa)
s=Document('Support Ticket')
j=0
#print res
if res:
#print "in res "
for i in range (2,15):
#print "i"
bb="select distinct(subdate(CURDATE(), "+cstr(i)+")) from `tabHoliday`"
#print bb
res1=webnotes.conn.sql(bb)
if res1:
cc="select distinct(subdate(CURDATE(), 1)) from `tabHoliday` where '"+cstr(res1[0][0])+"' in (select holiday_date from `tabHoliday` where parent='2014-2015/Maharashtra/001')"
#print cc
res2=webnotes.conn.sql(cc)
if res2:
#print "old j"
#print j
j=j+24
#print "new j"
#print j
else:
print "breaning "
break
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
qry1="select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 24+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 48+"+cstr(j)+" HOUR)"
#print qry1
qry=webnotes.conn.sql(qry1,as_list=1);
webnotes.errprint("in 24 "+cstr(qry))
if qry:
for [k] in qry:
s=Document('Support Ticket')
webnotes.errprint(k)
p=webnotes.conn.sql("select territory from `tabSupport Ticket` where name='"+k+"'")
#webnotes.errprint(p)
w=webnotes.conn.sql("select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = '%s' and defvalue = '%s' and r.role='Manager' and y.parent=p.name and r.parent=p.name"%('territory',p[0][0]))
#webnotes.errprint(w[0][0])
ee="update `tabSupport Ticket` set assigned_to='',assigned_to_higher_level='"+cstr(w[0][0])+"' where name='"+cstr(k)+"'"
#print ee
webnotes.conn.sql(ee)
webnotes.conn.commit()
#msg1 = ""
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(k)+"'")
if flg[0][0]=="not":
em=w[0][0]
msg9="Support Ticket '"+k+"' assigned to you...Please check it."
sendmail(em, subject='Support Ticket Alert', msg = msg9)
ss="update `tabSupport Ticket` set flag='fst' where name ='"+cstr(k)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
qr=webnotes.conn.sql("select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 48+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 72+"+cstr(j)+" HOUR)",as_list=1)
webnotes.errprint("in 48 "+cstr(qr))
if qr:
for [l] in qr:
webnotes.errprint(l)
q=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='National Manager' and r.parent=p.name")
#print q
ff="update `tabSupport Ticket` set assigned_to='',assigned_to_higher_level='"+cstr(q[0][0])+"' where name='"+cstr(l)+"'"
#print ff
webnotes.conn.sql(ff)
webnotes.conn.commit()
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(l)+"'")
if flg[0][0]=="fst":
msg10="Support Ticket '"+l+"' assigned to you...Please check it."
em=q[0][0]
sendmail(em, subject='Support Ticket Alert', msg = msg10)
ss="update `tabSupport Ticket` set flag='snd' where name ='"+cstr(l)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
qs=webnotes.conn.sql("select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 72+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 100+"+cstr(j)+" HOUR)",as_list=1);
webnotes.errprint("in 72 "+cstr(qs))
if qs:
for [m] in qs:
s=Document('Support Ticket')
webnotes.errprint(m)
qa=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='COO' and r.parent=p.name")
qd=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='CEO' and r.parent=p.name")
qtt=webnotes.conn.sql("update `tabSupport Ticket` set assigned_to='"+qa[0][0]+"',assigned_to_higher_level= '"+qd[0][0]+"' where name='"+m+"'")
webnotes.conn.commit()
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(m)+"'")
if flg[0][0]=="snd":
msg11="Hello, Support Ticket '"+m+"' assigned to you...Please check it."
em=qa[0][0]+","+qd[0][0]
sendmail(em, subject='Support Ticket Alert', msg = msg11)
ss="update `tabSupport Ticket` set flag='thrd' where name ='"+cstr(m)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
@webnotes.whitelist(allow_guest=True)
def get_payment_followup():
#from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
#from import datetime,date,timedelta
i = datetime.now()
p=i.strftime('%Y-%m-%d')
webnotes.errprint(p)
qry=webnotes.conn.sql("select name from `tabSales Invoice` where outstanding_amount>0",as_list=1)
for [i] in qry:
qr=webnotes.conn.sql("select installation from `tabSales Invoice` where name='"+i+"'",as_list=1)
# webnotes.errprint(qr)
if qr:
q=webnotes.conn.sql("select inst_date,employee_id from `tabInstallation Note` where name='"+qr[0][0]+"'")
#webnotes.errprint([q,"qqqq"])
# webnotes.errprint(q[0][1])
y=webnotes.conn.sql("select grand_total_export from `tabSales Invoice` where name='"+qry[0][0]+"'",as_list=1)
# webnotes.errprint(y)
v=webnotes.conn.sql("select outstanding_amount,customer from `tabSales Invoice` where name='"+qry[0][0]+"'",as_list=1)
# webnotes.errprint(v)
paid=flt(y[0][0]-v[0][0])
if v:
customer_type=webnotes.conn.get_value('Customer',v[0][1],'customer_type')
if customer_type=='OEM':
credit_days=webnotes.conn.get_value('Customer',v[0][1],'credit_days')
elif customer_type:
credit_days=webnotes.conn.get_value('Global Defaults',None,'customer_credit_days')
if not credit_days:
credit_days=0
#webnotes.errprint(["credit_days is here",credit_days])
if q:
webnotes.errprint(q)
s=q[0][0].strftime('%Y-%m-%d')
a=getdate(p)
e=cint((getdate(p) - getdate(s)).days)
if e== cint(credit_days):
webnotes.errprint("in e")
z=webnotes.conn.sql("select cell_number,user_id from `tabEmployee` where name='"+q[0][1]+"'")
webnotes.errprint(z)
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='Manager' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='Manager'")
webnotes.errprint(qq)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':y[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date': paid
}
#webnotes.errprint(flt(y[0][0]))
msg="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"',Total Amount for specified Sales Invoice is='"+cstr(y[0][0])+"', And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
webnotes.errprint(msg)
p=self.send_sms(z[0][0],msg)
q=self.send_sms(qq[0][0],msg)
r=self.send_email(z[0][1],msg)
s=self.send_email(ss[0][0],msg)
#x=self.send_email(z[0][1],msg)
#webnotes.errprint(qry[0][0])
elif e== 22+cint(credit_days):
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='National Manager' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='National Manager'",as_list=1)
#webnotes.errprint(qq)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':x[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date':paid
}
msg ="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"', Total Amount for specified sales Invoice is='"+cstr(y[0][0])+"',And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
p=send_sms(qq[0][0],msg)
q=send_email(ss[0][0],msg)
elif e>= 52+cint(credit_days):
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='CEO' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='CEO'",as_list=1)
webnotes.errprint(qq)
ss1=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='COO' and r.parent=p.name")
webnotes.errprint(ss1)
if ss1:
qq1=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='COO'",as_list=1)
webnotes.errprint(qq1)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':x[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date':paid
}
msg="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"',Total Amount fro specified invoice is='"+cstr(y[0][0])+"',And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
p=send_sms(qq[0][0],msg)
a=send_sms(qq1[0][0],msg)
r=send_email(ss[0][0],msg)
q=send_email(ss1[0][0],msg)
else:
webnotes.errprint("in last")
@webnotes.whitelist(allow_guest=True)
def fetch_sms(_type='POST'):
aa="select id,creation,message_body,sender_no from smslog where flag=0 and sender_no is not null and message_body like '#%#'"
bb=webnotes.conn.sql(aa)
from webnotes.model.doc import Document
import datetime,time
from webnotes.utils import now,get_first_day, get_last_day, add_to_date, nowdate, getdate
#print bb
for r in bb:
cc=r[2].split(',')
dd=cc[0].upper().replace(' ','')
#print cc
#print len(cc)
if dd=='#INNO' or dd=='#INND' or dd=='#INU':
if len(cc)==7:
#print "creation "+cstr( r)+"IN"
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[2]
d.client_name=cc[4]
d.place=cc[5]
d.activity_date=now()
d.ir_no=cc[1]
d.barcode=cc[3]
e=now().split(' ')
#d.activity_time=e[1]
d.amount=cc[6].replace('#','').replace(' ','')
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
#print d.name
webnotes.conn.commit()
elif dd=='#CRT' or dd=='#CLS':
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
import time
if dd=='#CRT' and len(cc)==3:
print "crt "+cstr(r) +"CRT CLD"
qr="select customer,employee_id from `tabInstallation Note` where product_barcode='"+cc[1]+"' "
print qr
res=webnotes.conn.sql(qr)
print res
g=t=a=''
if res:
print "in if"
gg="select name,customer_name,territory from tabCustomer where name='"+res[0][0]+"'"
print gg
g=webnotes.conn.sql(gg)
print g
w="select status,user_id from `tabEmployee` where name='%s'"%(res[0][1]);
print w
t=webnotes.conn.sql(w)
print t
print "for employe"
w="select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = 'territory' and defvalue = '"+g[0][2]+"' and r.role='Manager' and y.parent=p.name and r.parent=p.name"
print w
a=webnotes.conn.sql(w)
d=Document('Support Ticket')
d.subject=cc[1]
d.status='Open'
#if res:
if g:
d.territory=g and g[0][2] or ''
d.customer_name=g and g[0][1] or ''
d.customer=g and g[0][0] or ''
d.raised_by=r[3]
d.opening_date=nowdate()
#e=now().split(' ')
if t:
if t[0][0] =='Left':
d.assigned_to=a[0][0]
d.assigned_to_higher_level=a[0][0]
#return t[0][1]
else:
d.assigned_to=t[0][1]
d.assigned_to_higher_level=a[0][0]
#e=now().split(' ')
#d.sender_phone_no=r[3]
#d.activity_time='01:01:01'
d.save(new=1)
webnotes.conn.commit()
print d.name
flg=webnotes.conn.sql("select flag from `tabSupport Ticket` where name = '"+d.name+"'")
#print flg
if flg[0][0]=="nott":
msg8="Hello, Support Ticket '"+d.name+"' assigned to you...Please check it."
print msg8
em=t[0][1]+","+a[0][0]
print em
sendmail(em, subject='Support Ticket Alert', msg = msg8)
ss="update `tabSupport Ticket` set flag='not' where name = '"+d.name+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
if d.name:
p=Document('Communication')
p.parent=d.name
p.parentfield='Communications'
p.parenttype='Support Ticket'
p.content=cc[2].replace('#','')
p.subject=cc[1]
p.sender = d.raised_by
p.save(new=1)
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=='#CLS' and len(cc)==2:
if len(cc)==2:
d=cc[1]
#print d[:-1]
#print "cls "+cstr(r)
msgg="Dear Customer,according to your request respective ticket is closed."
ee="update `tabSupport Ticket` set status='Closed' where name='"+cstr(d[:-1])+"'"
print ee
e="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
print e
print r
webnotes.conn.sql(ee)
webnotes.conn.sql(e)
webnotes.conn.commit()
no1=r[3]
no = no1.replace("+", "")
webnotes.errprint(no)
print "END SMS..."
pp=send_sms(msgg,no)
elif dd=='#SLD' or dd=='#SLO':
#print len(cc)
if len(cc)==6 :
print cc
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[2]
d.place=cc[3]
d.sender_phone_no=r[3]
d.activity_date=now()
d.product_name=cc[4]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[5].replace('#','')
d.save(new=1)
webnotes.conn.commit()
#print d.name
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=='#AMCD' or dd=='#AMCO' :
if len(cc)==6:
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
#d.ir_no=IR_NO[1:-1]
d.barcode=cc[2]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[5]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name :
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=="#SED" or dd=="#SEO" :
if len(cc)==6 :
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
d.service_call_type=cc[5].replace('#','')
d.barcode=cc[2]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
elif dd=="#PR":
if len(cc)== 11:
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
#d.service_call_type=service_call_type[1:-1]
d.payment_type=cc[5]
d.payment_mode=cc[7]
d.cheque_no=cc[8]
d.cheque_bank=cc[9]
d.cheque_status=cc[10].replace('#','')
d.barcode=cc[2]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[6]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
elif dd=="#DC":
#print "creation for dc need 6 fields "+cstr(cc)
if len(cc)==6:
#return phone_no[-11:]
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[2]
d.place=cc[4]
d.activity_date=now()
d.sender_phone_no=r[3]
#d.service_call_type=service_call_type[1:-1]
d.product_name=cc[5].replace('#','')
#d.activity_time=time.strftime("%H:%M:%S")
#d.amount=deal_amount[1:-1]
d.phone_no=cc[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
@webnotes.whitelist(allow_guest=True)
def posting():
from werkzeug.wrappers import Request, Response
return request.form['username']
#return "hi"
@webnotes.whitelist(allow_guest=True)
def get_post(data,_type='POST'):
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
from webnotes.model.doc import Document
import time
abc=json.loads(data)
aa=Document('Installation Note')
aa.customer=abc['customer_id']
aa.customer_address=abc['address']
aa.address_display=abc['address']
aa.contact_person=abc['contact_person']
aa.employee_id=abc['employee_no']
aa.internal_order_no=abc['iof_no']
aa.contact_email=abc['email']
aa.contact_mobile=abc['phone']
aa.clinic_name=abc['clinic_name']
aa.doctor_name=abc['doctor_name']
aa.city=abc['city']
aa.pincode=abc['pincode']
aa.director_name=abc['director_name']
aa.state=abc['state']
aa.reg_no_clinic=abc['reg_no_clinic']
aa.reg_no_doctor=abc['reg_no_doctor']
aa.website=abc['website']
aa.palce=abc['palce']
#aa.inst_date=abc['date_of_installation'].strftime('%Y-%m-%d')
aa.employee_name=abc['employee_name']
aa.inst_reprot_no=abc['inst_reprot_no']
aa.user_name=abc['user_name']
aa.dept=abc['dept']
aa.contact_mobile=abc['contact_no']
aa.dept1=abc['dept1']
aa.contact_no1=abc['contact_no1']
aa.product_barcode=abc['product_barcode']
aa.version=abc['version']
aa.material_supplied=abc['material_supplied']
aa.inst_start_time=abc['inst_start_time']
aa.inst_date=abc['inst_date']
aa.inst_end_time=abc['inst_end_time']
aa.inst_end_date=abc['inst_end_date']
aa.proc=abc['proc']
aa.ram=abc['ram']
aa.hdd=abc['hdd']
aa.me=abc['me']
aa.other=abc['other']
aa.model_no=abc['model_no']
aa.serial_no=abc['serial_no']
aa.os=abc['os']
aa.inst_type=abc['inst_type']
aa.no_in_case=abc['no_in_case']
aa.training=abc['training']
aa.customer_remark=abc['customer_remark']
aa.engineers_remark=abc['engineers_remark']
aa.status1=abc['status']
aa.signature=abc['signature']
aa.sign_seal=abc['sign_seal']
aa.save(new=1)
webnotes.conn.commit()
return aa.name
@webnotes.whitelist(allow_guest=True)
def get_customer_detail(customer_id):
qr="select customer_no,email from tabCustomer where name="+customer_id
res=webnotes.conn.sql(qr)
customerobj= {}
for r in res:
customerobj['phone'] = r[0]
customerobj['email'] = r[1]
customerobj['clinic_name'] = ''
customerobj['address'] = ''
customerobj['doctor_name'] = ''
customerobj['city'] = ''
customerobj['pincode'] = ''
customerobj['director_name'] = ''
customerobj['state'] = ''
customerobj['email'] = ''
customerobj['reg_no_clinic'] = ''
customerobj['reg_no_doctor'] = ''
customerobj['website'] = ''
return customerobj
@webnotes.whitelist(allow_guest=True)
def get_item_detail(barcode):
qr="select name,item_code,description from `tabSerial No` limit 5"
res=webnotes.conn.sql(qr)
itemsobj= {}
itemlist = []
for r in res:
itemobj={}
itemobj['barcode'] = r[0]
itemobj['description'] = r[1]
itemobj['details'] = r[2]
itemlist.append(itemobj)
return itemlist
@webnotes.whitelist(allow_guest=True)
def send_sales_details():
print "sales details"
from webnotes.utils.email_lib import sendmail
qr="select a.territory,b.item_code,sum(b.qty) as qty,sum(b.export_amount) as amt from `tabSales Order Item` b,`tabSales Order` a where a.name=b.parent group by b.item_code"
res=webnotes.conn.sql(qr)
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Product</td> <td>Quantity</td><td>Total Amount</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
for rr in res:
msg1=''
bb="select ifnull(a.territory,''),ifnull(b.item_code,''),ifnull(sum(b.qty),''),ifnull(sum(b.export_amount),'') from `tabSales Order Item` b,`tabSales Order` a where DATE(a.creation)=CURDATE() and a.name=b.parent and a.territory='"+rr[0]+"' group by b.item_code "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
#print "------------------- region"
#print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
sendmail('gangadhar.k@indictranstech.com', subject='Regional Sales Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
if res1:
sendmail('gangadhar.k@indictranstech.com', subject="sales alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_ticket_details():
print "ticket"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Total Tickets Created</td> <td>Total Tickets Closed</td><td>Total Open Tickets</td><td>Total Paid Tickets</td><td>Total Paid Tickets Amount</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSupport Ticket` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="SELECT ifnull(a.territory,''),count(a.name),(select count(a.name) FROM `tabSupport Ticket` a WHERE DATE(a.creation)=CURDATE() and a.territory='"+cstr(rr[0])+"' and a.status='Closed' group by a.territory),(select count(a.name) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.status<>'Closed' group by a.territory),(select count(a.name) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.is_paid='Yes' group by a.territory),(select sum(amount) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.is_paid='Yes' group by a.territory) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' group by a.territory "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
#print "------------------- region"
#print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
sendmail('gangadhar.k@indictranstech.com', subject='Regional Support Ticket Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
if res1:
sendmail('gangadhar.k@indictranstech.com', subject="Support Ticket Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_isbpl_details():
print "item sold below pricelist"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Sales Order</td><td>Customer</td><td>Product</td><td>Price List Rate</td><td>Sold Rate</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select a.territory,a.name,a.customer,b.item_code,b.ref_rate,b.export_rate from `tabSales Order Item` b,`tabSales Order` a where DATE(a.creation)=CURDATE() and a.name=b.parent and b.ref_rate <> b.export_rate and b.ref_rate != 0 and a.territory='"+cstr(rr[0])+"' order by a.name "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('gangadhar.k@indictranstech.com', subject='Regional Items Sold Below Price List Rate Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('gangadhar.k@indictranstech.com', subject="Items Sold Below Price List Rate Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_oppt_details():
print "old oppts"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Employee</td><td>Opportunity</td><td>LEAD/Customer</td><td>Created Before days</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabOpportunity` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select a.territory,a.owner,a.name,CASE a.enquiry_from WHEN 'Customer' THEN a.customer ELSE a.lead END,DATEDIFF(CURDATE(),DATE(a.creation)) from `tabOpportunity` a where DATEDIFF(CURDATE(),DATE(a.creation))>=25 and status<> 'Quotation' and a.territory='"+rr[0]+"'order by a.owner,a.territory "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('gangadhar.k@indictranstech.com', subject='Regional Not Converted Opportunities Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('gangadhar.k@indictranstech.com', subject="Not Converted Opportunities Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_invoice_details():
print "invoice not created"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Employee</td><td>Sales Oder</td><td>Customer ID</td><td>Customer Name</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select territory,owner,name,customer,customer_name from `tabSales Order` where territory='"+rr[0]+"' and name not in (select distinct(sales_order) from `tabSales Invoice Item` where sales_order is not null) order by territory,owner"
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('gangadhar.k@indictranstech.com', subject='Regional Invoices Not Created Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('gangadhar.k@indictranstech.com', subject="Invoices Not Created Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_amccmc_details():
print "amc cmc"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >AMC/CMC Details</td><td>Asset Name </td><td>AMC/CMC Expiring Date</td></tr>"""
end="""</table></body></html>"""
aa="""select b.amc_details,a.item_code,datediff(date(b.expiry_date),CURDATE()), b.start_date,b.expiry_date from `tabAMC Details` b,`tabItem` a where a.name=b.parent and expiry_date in(select max(expiry_date) from `tabAMC Details` where parent=b.parent) and datediff(date(b.expiry_date),CURDATE())<=15"""
res=webnotes.conn.sql(aa)
msg=''
print res
for rr in res:
print rr
print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[4])+"</td></tr>"
print msg
msg1=start+""+cstr(msg)+" "+end
print msg1
if res:
sendmail('gangadhar.k@indictranstech.com', subject="AMC/CMC Expiring Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_todays_material_details():
#print "todays_material_"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Purchase Order</td><td>Product </td><td>Quantity</td></tr>"""
end="""</table></body></html>"""
aa="""select a.name,b.item_code,b.schedule_date,b.qty from `tabPurchase Order`a,`tabPurchase Order Item`b where a.name not in(select d.prevdoc_docname from `tabPurchase Receipt`c,`tabPurchase Receipt Item`d where d.schedule_date=CURDATE() and d.parent=c.name) and b.schedule_date=CURDATE() and b.parent=a.name"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
#print rr
#print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[3])+"</td></tr>"
#print msg
msg1=start+""+cstr(msg)+" "+end
if res:
sendmail('gangadhar.k@indictranstech.com', subject="Todays Expected Material Not Received Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_low_stock_details():
print "low stock"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Product</td><td>Warehouse </td><td>Actual Quantity in Warehouse</td><td>Minimum Quantity level</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct a.item_code,a.warehouse,a.actual_qty,b.re_order_level from `tabBin`a,`tabItem`b where a.actual_qty<=b.re_order_level and b.re_order_level!=0"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
#print rr
#print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[2])+"</td><td>"+cstr(rr[3])+"</td></tr>"
#print msg
msg1=start+""+cstr(msg)+" "+end
if res:
sendmail('gangadhar.k@indictranstech.com', subject="Minimum Stock Level Reached Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def GetVerify(verificationCode):
return '0^232322422'
@webnotes.whitelist(allow_guest=True)
def GetEmployee(sessionCode,empID):
aa="select employee_name from tabEmployee where name="+empID
res=webnotes.conn.sql(aa)
if res:
return '0^'+res[0][0]
else:
return "Employee not found for employee ID "+empID
@webnotes.whitelist(allow_guest=True)
def GetProducts(sessionCode,instType,customerID):
if sessionCode:
return '0^53424423423'
else:
return "1^invalid session code"
@webnotes.whitelist(allow_guest=True)
def GetInstDetails(sessionCode,instType,prodBarCode):
if sessionCode:
return '0^shree clinic^deccan pune^Dr.Metgud^pune^411004^Dr. Sanjay Joshi^Maharashtra^sanjayjoshi@gmail.com^9822012345^www.sanjayjoshi.com^MH/REG/CL/21232^MH/REG/DR/212323^IN00004^ScanDoc^IOF-00003^2242423~3423424545~553534434~353r445345~3434434'
else:
return "1^invalid session code"
@webnotes.whitelist(allow_guest=True)
def SetRegister(sessionCode,instType,customerID,prodBarCode,empID,prodName,prodVersion,iofNumber,instReportNumber,contactPersonsOnSite,mateBarCode):
if sessionCode:
return '0^IN00004'
else:
return "1^invalid session code"
|
agpl-3.0
| -3,199,338,655,356,066,000
| 42.427785
| 684
| 0.564662
| false
| 3.118684
| false
| false
| false
|
Lydwen/Mr.Statamutation
|
Mr.Statapython/statapython/__main__.py
|
1
|
4758
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import webbrowser
from .statareport import Reporting
from .stataspoon import MutationsTester
from .statautils import Directory, Logger
from .stataxml import ConfigParser
""" Report filename """
REPORT_FILENAME = 'statam_report.html'
def main(args):
"""
Main function.
:param args: command-line arguments
"""
if args.project:
# Go to project directory
os.chdir(os.path.abspath(args.project))
# Check if Spoon need to be applied
if not args.disable_spoon:
# Clean the report directory
Logger.log('Pre-cleaning report directory "%s"' % args.report_directory, True)
pre_clean(args.report_directory)
# Load the configuration
Logger.log('Load mutations configuration "%s"' % args.mutations_config, True)
mutations_config = ConfigParser.parse(args.mutations_config)
# Create mutator tester, and execute original tests
mutator = MutationsTester(args.tests_directory, args.report_directory, not args.keep_temp)
mutator.process(args.original)
# Get all mutations
mutations = mutations_config['mutations']['mutation']
if not isinstance(mutations, (list, tuple)): mutations = (mutations,) # Bind to list
# Execute every mutations
for mutation in mutations:
mutator.process(mutation['name'],
mutation['processors'].get('processor', ()) if mutation.get('processors', None) else (),
mutation.get('selector', ()))
# Check if report generation is enabled
if not args.disable_report:
# Compute reporting
report_file = os.path.join(args.report_directory, REPORT_FILENAME)
report_abspath = os.path.abspath(report_file)
Logger.log('=============== Generating report ===============', True)
Reporting(args.report_directory, args.original).report(report_file)
Logger.log('Report accessible at: %s' % report_abspath)
# Open in browser if asked to
if args.open_browser:
Logger.log('Opening report file in browser...')
webbrowser.open(report_abspath)
def pre_clean(directory):
"""
Pre-clean the project.
:param directory: report directory
:return:
"""
# Clean directory
try:
Directory.delete(directory)
except:
Logger.log('[Warning] Error on cleaning report directory ("%s")' % directory)
# Create a new one
try:
Directory.create(directory)
except:
Logger.log('[Warning] Error on creating report directory ("%s")' % directory)
def get_parser():
"""
Initialize command-line parser with default and optional arguments.
:return: parser
"""
# Enable command-line parsing
parser = argparse.ArgumentParser()
# Optional arguments
parser.add_argument('-p', '--project',
help='project main directory')
parser.add_argument('-m', '--mutations-config',
help='mutations configuration file',
default='./statamutations.xml')
parser.add_argument('-r', '--report-directory',
help='report output directory (generated report)',
default='./target/statam-report')
parser.add_argument('-t', '--tests-directory',
help='tests directory (output when tests are executed)',
default='./target/surefire-reports')
parser.add_argument('-g', '--original',
help='original (not mutated) tests directory',
default='_original_')
parser.add_argument('-k', '--keep-temp',
help='enable/disable temporary file cleaning',
action='store_true')
parser.add_argument('-o', '--open-browser',
help='open the report file in the default browser after generation',
action='store_true')
parser.add_argument('--disable-spoon',
help='disable Spoon (only the report will be computed)',
action='store_true')
parser.add_argument('--disable-report',
help='disable report generation (only Spoon will be applied)',
action='store_true')
return parser
# Main execution
if __name__ == "__main__":
# Enable logging
Logger.ENABLED = True
Logger.log('=============== <3 - Welcome in Mr.Statamutation project - <3 ===============', True)
# Start the main
sys.exit(main(
# Parse command-line args
get_parser().parse_args()
))
|
mit
| -4,865,774,189,952,201,000
| 32.744681
| 116
| 0.592266
| false
| 4.527117
| true
| false
| false
|
luisza/async_notifications
|
async_notifications/mail_utils.py
|
1
|
1362
|
# encoding: utf-8
'''
Free as freedom will be 26/9/2016
@author: luisza
'''
from __future__ import unicode_literals
from .settings import (NOTIFICATION_USER_MODEL, USER_LOOKUP_FIELDS,
NOTIFICATION_GROUP_MODEL, GROUP_LOOKUP_FIELDS)
from .utils import extract_emails, get_model
#from django.contrib.auth.models import User, Group
User = get_model(NOTIFICATION_USER_MODEL)
Group = get_model(NOTIFICATION_GROUP_MODEL)
def get_mails_from_group(group_name):
name = group_name.replace("@group", "").replace("__", " ")
group = Group.objects.get(**{GROUP_LOOKUP_FIELDS['group_lookup']: name})
email = None
# check if group has email (util with mail list approach)
name_field = GROUP_LOOKUP_FIELDS['email']
if name_field:
if hasattr(group, name_field):
email = getattr(group, name_field)
if email:
return [email]
if 'group_lookup' in USER_LOOKUP_FIELDS:
users = User.objects.filter(**{USER_LOOKUP_FIELDS['group_lookup']: name})
return [u.email for u in users]
return []
def get_all_emails(text):
if text is None:
return []
mails = extract_emails(text)
gmails = []
for mail in mails:
if "@group" in mail:
mails.remove(mail)
gmails += get_mails_from_group(mail)
mails += gmails
return set(mails)
|
gpl-2.0
| 476,110,698,621,409,200
| 27.375
| 81
| 0.637298
| false
| 3.510309
| false
| false
| false
|
greggyNapalm/lunaport_server
|
lunaport_server/plugg_views/HookRegistration.py
|
1
|
5396
|
# -*- encoding: utf-8 -*-
"""
lunaport.plugg_views.hook_registration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Class-based view for hook_registration resource.
hook_registration - m2m connection case with hook. Rule to starte test.
"""
import json
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from flask import jsonify, request, Response, url_for, session
from Base import BaseView
from .. dao.exceptions import StorageError
from .. dao.hook_registration import RDBMS
from .. domain.hook_registration import HookRegistrationBuilder, HookRegistrationAdaptor
class HookRegistration(BaseView):
str_params = [
'case_id',
'hook_id',
'descr',
'cfg',
]
dao = RDBMS
def get(self, hook_registration_id=None):
if hook_registration_id is None: # walk through all registrations
q = self.cmpl_query()
try:
h_regs, per_page, next_page, prev_page = self.dao.get_many(**q)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Business logic layer error',
'error_text': str(e),
}
return jsonify(msg), 500
if not h_regs:
return Response(status=404)
body = json.dumps(
[HookRegistrationAdaptor.to_resp(r, jsonify=False) for r in h_regs])
hdrs = {
'Content-Type': 'application/json; charset=utf-8',
'Link': self.cmpl_link_hdr(request, per_page, next_page,
prev_page),
}
return Response(response=body, status=200,
headers=hdrs)
else: # try to get single *hook_registration* entrie by id
try:
h_regs = self.dao.get_single(hook_registration_id=hook_registration_id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
if not h_regs:
return Response(status=404)
hdrs = {'Content-Type': 'application/json; charset=utf-8'}
return Response(response=HookRegistrationAdaptor.to_resp(h_regs), status=200,
headers=hdrs)
def post(self):
try:
hook_registration = HookRegistrationBuilder.from_Flask_req(request, session)
except ValueError as e:
msg = {
'error_type': 'Malformed body attributes',
'error_text': str(e),
}
return jsonify(msg), 422
try:
hook_registration.id = self.dao.insert(hook_registration)
hook_registration = self.dao.get_single(hook_registration_id=hook_registration.id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Malformed body attributes',
'error_text': str(e),
}
return jsonify(msg), 409
res_location = '{}{}'.format(url_for('hook_registration'), hook_registration.id)
return Response(response=HookRegistrationAdaptor.to_resp(hook_registration), status=201,
headers={
'Location': res_location,
'Content-Type': 'application/json; charset=utf-8'
})
def patch(self, hook_registration_id):
diff = request.json
if not diff:
msg = {
'error_type': 'Malformed body attributes',
'error_text': 'Can\'t deserialize json document',
}
return jsonify(msg), 422
try:
hook_registration = self.dao.update_by_id(hook_registration_id, diff)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
res_location = '{}{}'.format(url_for('hook_registration'), hook_registration.id)
return Response(response=HookRegistrationAdaptor.to_resp(hook_registration), status=200,
headers={
'Location': res_location,
'Content-Type': 'application/json; charset=utf-8'
})
def delete(self, hook_registration_id):
try:
self.dao.delete(hook_registration_id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Malformed user provided data',
'error_text': str(e),
}
return jsonify(msg), 422
return Response(status=200)
|
apache-2.0
| -1,302,585,851,489,007,900
| 34.038961
| 96
| 0.509451
| false
| 4.478008
| false
| false
| false
|
redhat-openstack/glance
|
glance/api/v2/metadef_namespaces.py
|
1
|
28184
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import webob.exc
from wsme.rest.json import fromjson
from wsme.rest.json import tojson
from glance.api import policy
from glance.api.v2.model.metadef_namespace import Namespace
from glance.api.v2.model.metadef_namespace import Namespaces
from glance.api.v2.model.metadef_object import MetadefObject
from glance.api.v2.model.metadef_property_type import PropertyType
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance.common import wsme_utils
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
LOG = logging.getLogger(__name__)
_LE = i18n._LE
_LW = i18n._LW
_LI = i18n._LI
CONF = cfg.CONF
class NamespaceController(object):
def __init__(self, db_api=None, policy_enforcer=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
policy_enforcer=self.policy)
self.ns_schema_link = '/v2/schemas/metadefs/namespace'
self.obj_schema_link = '/v2/schemas/metadefs/object'
def index(self, req, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
try:
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
# Get namespace id
if marker:
namespace_obj = ns_repo.get(marker)
marker = namespace_obj.namespace_id
database_ns_list = ns_repo.list(
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir, filters=filters)
for db_namespace in database_ns_list:
# Get resource type associations
filters = dict()
filters['namespace'] = db_namespace.namespace
rs_repo = (
self.gateway.get_metadef_resource_type_repo(req.context))
repo_rs_type_list = rs_repo.list(filters=filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in repo_rs_type_list]
if resource_type_list:
db_namespace.resource_type_associations = (
resource_type_list)
namespace_list = [Namespace.to_wsme_model(
db_namespace,
get_namespace_href(db_namespace),
self.ns_schema_link) for db_namespace in database_ns_list]
namespaces = Namespaces()
namespaces.namespaces = namespace_list
if len(namespace_list) != 0 and len(namespace_list) == limit:
namespaces.next = namespace_list[-1].namespace
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return namespaces
@utils.mutating
def create(self, req, namespace):
try:
namespace_created = False
# Create Namespace
ns_factory = self.gateway.get_metadef_namespace_factory(
req.context)
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
new_namespace = ns_factory.new_namespace(**namespace.to_dict())
ns_repo.add(new_namespace)
namespace_created = True
# Create Resource Types
rs_factory = (
self.gateway.get_metadef_resource_type_factory(req.context))
rs_repo = self.gateway.get_metadef_resource_type_repo(req.context)
if namespace.resource_type_associations:
for resource_type in namespace.resource_type_associations:
new_resource = rs_factory.new_resource_type(
namespace=namespace.namespace,
**resource_type.to_dict())
rs_repo.add(new_resource)
# Create Objects
object_factory = self.gateway.get_metadef_object_factory(
req.context)
object_repo = self.gateway.get_metadef_object_repo(req.context)
if namespace.objects:
for metadata_object in namespace.objects:
new_meta_object = object_factory.new_object(
namespace=namespace.namespace,
**metadata_object.to_dict())
object_repo.add(new_meta_object)
# Create Namespace Properties
prop_factory = (
self.gateway.get_metadef_property_factory(req.context))
prop_repo = self.gateway.get_metadef_property_repo(req.context)
if namespace.properties:
for (name, value) in namespace.properties.items():
new_property_type = (
prop_factory.new_namespace_property(
namespace=namespace.namespace,
**self._to_property_dict(name, value)
))
prop_repo.add(new_property_type)
except exception.Forbidden as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
# Return the user namespace as we don't expose the id to user
new_namespace.properties = namespace.properties
new_namespace.objects = namespace.objects
new_namespace.resource_type_associations = (
namespace.resource_type_associations)
return Namespace.to_wsme_model(new_namespace,
get_namespace_href(new_namespace),
self.ns_schema_link)
def _to_property_dict(self, name, value):
# Convert the model PropertyTypes dict to a JSON string
db_property_type_dict = dict()
db_property_type_dict['schema'] = tojson(PropertyType, value)
db_property_type_dict['name'] = name
return db_property_type_dict
def _cleanup_namespace(self, namespace_repo, namespace, namespace_created):
if namespace_created:
try:
namespace_obj = namespace_repo.get(namespace.namespace)
namespace_obj.delete()
namespace_repo.remove(namespace_obj)
msg = ("Cleaned up namespace %(namespace)s "
% {'namespace': namespace.namespace})
LOG.debug(msg)
except exception:
msg = (_LE("Failed to delete namespace %(namespace)s ") %
{'namespace': namespace.namespace})
LOG.error(msg)
def show(self, req, namespace, filters=None):
try:
# Get namespace
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
namespace_obj = ns_repo.get(namespace)
namespace_detail = Namespace.to_wsme_model(
namespace_obj,
get_namespace_href(namespace_obj),
self.ns_schema_link)
ns_filters = dict()
ns_filters['namespace'] = namespace
# Get objects
object_repo = self.gateway.get_metadef_object_repo(req.context)
db_metaobject_list = object_repo.list(filters=ns_filters)
object_list = [MetadefObject.to_wsme_model(
db_metaobject,
get_object_href(namespace, db_metaobject),
self.obj_schema_link) for db_metaobject in db_metaobject_list]
if object_list:
namespace_detail.objects = object_list
# Get resource type associations
rs_repo = self.gateway.get_metadef_resource_type_repo(req.context)
db_resource_type_list = rs_repo.list(filters=ns_filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
if resource_type_list:
namespace_detail.resource_type_associations = (
resource_type_list)
# Get properties
prop_repo = self.gateway.get_metadef_property_repo(req.context)
db_properties = prop_repo.list(filters=ns_filters)
property_list = Namespace.to_model_properties(db_properties)
if property_list:
namespace_detail.properties = property_list
if filters and filters['resource_type']:
namespace_detail = self._prefix_property_name(
namespace_detail, filters['resource_type'])
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return namespace_detail
def update(self, req, user_ns, namespace):
namespace_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
ns_obj = namespace_repo.get(namespace)
ns_obj.namespace = wsme_utils._get_value(user_ns.namespace)
ns_obj.display_name = wsme_utils._get_value(user_ns.display_name)
ns_obj.description = wsme_utils._get_value(user_ns.description)
# Following optional fields will default to same values as in
# create namespace if not specified
ns_obj.visibility = (
wsme_utils._get_value(user_ns.visibility) or 'private')
ns_obj.protected = (
wsme_utils._get_value(user_ns.protected) or False)
ns_obj.owner = (
wsme_utils._get_value(user_ns.owner) or req.context.owner)
updated_namespace = namespace_repo.save(ns_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return Namespace.to_wsme_model(updated_namespace,
get_namespace_href(updated_namespace),
self.ns_schema_link)
def delete(self, req, namespace):
namespace_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = namespace_repo.get(namespace)
namespace_obj.delete()
namespace_repo.remove(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def delete_objects(self, req, namespace):
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = ns_repo.get(namespace)
namespace_obj.delete()
ns_repo.remove_objects(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def delete_properties(self, req, namespace):
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = ns_repo.get(namespace)
namespace_obj.delete()
ns_repo.remove_properties(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def _prefix_property_name(self, namespace_detail, user_resource_type):
prefix = None
if user_resource_type and namespace_detail.resource_type_associations:
for resource_type in namespace_detail.resource_type_associations:
if resource_type.name == user_resource_type:
prefix = resource_type.prefix
break
if prefix:
if namespace_detail.properties:
new_property_dict = dict()
for (key, value) in namespace_detail.properties.items():
new_property_dict[prefix + key] = value
namespace_detail.properties = new_property_dict
if namespace_detail.objects:
for object in namespace_detail.objects:
new_object_property_dict = dict()
for (key, value) in object.properties.items():
new_object_property_dict[prefix + key] = value
object.properties = new_object_property_dict
if object.required and len(object.required) > 0:
required = [prefix + name for name in object.required]
object.required = required
return namespace_detail
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['self', 'schema', 'created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def index(self, request):
params = request.params.copy()
limit = params.pop('limit', None)
marker = params.pop('marker', None)
sort_dir = params.pop('sort_dir', 'desc')
if limit is None:
limit = CONF.limit_param_default
limit = min(CONF.api_limit_max, int(limit))
query_params = {
'sort_key': params.pop('sort_key', 'created_at'),
'sort_dir': self._validate_sort_dir(sort_dir),
'filters': self._get_filters(params)
}
if marker is not None:
query_params['marker'] = marker
if limit is not None:
query_params['limit'] = self._validate_limit(limit)
return query_params
def _validate_sort_dir(self, sort_dir):
if sort_dir not in ['asc', 'desc']:
msg = _('Invalid sort direction: %s') % sort_dir
raise webob.exc.HTTPBadRequest(explanation=msg)
return sort_dir
def _get_filters(self, filters):
visibility = filters.get('visibility')
if visibility:
if visibility not in ['public', 'private']:
msg = _('Invalid visibility value: %s') % visibility
raise webob.exc.HTTPBadRequest(explanation=msg)
return filters
def _validate_limit(self, limit):
try:
limit = int(limit)
except ValueError:
msg = _("limit param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _("limit param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def show(self, request):
params = request.params.copy()
query_params = {
'filters': self._get_filters(params)
}
return query_params
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = fromjson(Namespace, body)
return dict(namespace=namespace)
def update(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = fromjson(Namespace, body)
return dict(user_ns=namespace)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def create(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response, 201)
response.location = get_namespace_href(namespace)
def show(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response)
def index(self, response, result):
params = dict(response.request.params)
params.pop('marker', None)
query = urlparse.urlencode(params)
result.first = "/v2/metadefs/namespaces"
result.schema = "/v2/schemas/metadefs/namespaces"
if query:
result.first = '%s?%s' % (result.first, query)
if result.next:
params['marker'] = result.next
next_query = urlparse.urlencode(params)
result.next = '/v2/metadefs/namespaces?%s' % next_query
ns_json = tojson(Namespaces, result)
response = self.__render(ns_json, response)
def update(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response, 200)
def delete(self, response, result):
response.status_int = 204
def delete_objects(self, response, result):
response.status_int = 204
def delete_properties(self, response, result):
response.status_int = 204
def __render(self, json_data, response, response_status=None):
body = json.dumps(json_data, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
if response_status:
response.status_int = response_status
return response
def _get_base_definitions():
return get_schema_definitions()
def get_schema_definitions():
return {
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [
{"$ref": "#/definitions/positiveInteger"},
{"default": 0}
]
},
"stringArray": {
"type": "array",
"items": {"type": "string"},
# "minItems": 1,
"uniqueItems": True
},
"property": {
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["title", "type"],
"properties": {
"name": {
"type": "string"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"array",
"boolean",
"integer",
"number",
"object",
"string",
None
]
},
"required": {
"$ref": "#/definitions/stringArray"
},
"minimum": {
"type": "number"
},
"maximum": {
"type": "number"
},
"maxLength": {
"$ref": "#/definitions/positiveInteger"
},
"minLength": {
"$ref": "#/definitions/positiveIntegerDefault0"
},
"pattern": {
"type": "string",
"format": "regex"
},
"enum": {
"type": "array"
},
"readonly": {
"type": "boolean"
},
"default": {},
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
"array",
"boolean",
"integer",
"number",
"object",
"string",
None
]
},
"enum": {
"type": "array"
}
}
},
"maxItems": {
"$ref": "#/definitions/positiveInteger"
},
"minItems": {
"$ref": "#/definitions/positiveIntegerDefault0"
},
"uniqueItems": {
"type": "boolean",
"default": False
},
"additionalItems": {
"type": "boolean"
},
}
}
}
}
def _get_base_properties():
return {
"namespace": {
"type": "string",
"description": _("The unique namespace text."),
"maxLength": 80,
},
"display_name": {
"type": "string",
"description": _("The user friendly name for the namespace. Used "
"by UI if available."),
"maxLength": 80,
},
"description": {
"type": "string",
"description": _("Provides a user friendly description of the "
"namespace."),
"maxLength": 500,
},
"visibility": {
"type": "string",
"description": _("Scope of namespace accessibility."),
"enum": ["public", "private"],
},
"protected": {
"type": "boolean",
"description": _("If true, namespace will not be deletable."),
},
"owner": {
"type": "string",
"description": _("Owner of the namespace."),
"maxLength": 255,
},
"created_at": {
"type": "string",
"description": _("Date and time of namespace creation"
" (READ-ONLY)"),
"format": "date-time"
},
"updated_at": {
"type": "string",
"description": _("Date and time of the last namespace modification"
" (READ-ONLY)"),
"format": "date-time"
},
"schema": {
"type": "string"
},
"self": {
"type": "string"
},
"resource_type_associations": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"prefix": {
"type": "string"
},
"properties_target": {
"type": "string"
}
}
}
},
"properties": {
"$ref": "#/definitions/property"
},
"objects": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"required": {
"$ref": "#/definitions/stringArray"
},
"properties": {
"$ref": "#/definitions/property"
},
}
}
}
}
def get_schema():
properties = _get_base_properties()
definitions = _get_base_definitions()
mandatory_attrs = Namespace.get_mandatory_attrs()
schema = glance.schema.Schema(
'namespace',
properties,
required=mandatory_attrs,
definitions=definitions
)
return schema
def get_collection_schema():
namespace_schema = get_schema()
return glance.schema.CollectionSchema('namespaces', namespace_schema)
def get_namespace_href(namespace):
base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace
return base_href
def get_object_href(namespace_name, metadef_object):
base_href = ('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadef_object.name))
return base_href
def create_resource():
"""Namespaces resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = NamespaceController()
return wsgi.Resource(controller, deserializer, serializer)
|
apache-2.0
| 8,594,759,101,912,330,000
| 36.983827
| 79
| 0.532749
| false
| 4.582764
| false
| false
| false
|
taufique71/widespace-job-manager
|
graph.py
|
1
|
1340
|
class Graph:
def __init__(self, config):
self.config = config
self.vertices = list(config["jobs"].keys())
self.edges = []
self.outdegree = {}
for key, value in config["dependencies"].items():
for val in value:
self.edges.append( (key, val) )
self.update_outdegree()
return
def get_vertices(self):
return self.vertices
def get_edges(self):
return self.edges
def get_outdegree(self):
return self.outdegree
def get_zero_outdegree_vertices(self):
zero_outdegree_vertices = []
for v in self.vertices:
if self.outdegree[v] == 0:
zero_outdegree_vertices.append(v)
return zero_outdegree_vertices
def update_outdegree(self):
self.outdegree = {}
for v in self.vertices:
self.outdegree[v] = 0
for e in self.edges:
self.outdegree[e[0]] = self.outdegree[e[0]]+1
return
def remove_edge(self, edge):
self.edges.remove( edge )
self.update_outdegree()
return
def remove_vertex(self, vertex):
self.vertices.remove(vertex)
for e in list(self.edges):
if e[1] == vertex:
self.remove_edge(e)
self.update_outdegree()
return
|
mit
| 6,338,988,099,701,754,000
| 26.916667
| 57
| 0.551493
| false
| 3.929619
| false
| false
| false
|
mhgp/convert_txts2lyx4shosetsukaninaro
|
cnv.py
|
1
|
5770
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
import io
import re
import codecs
# カレントディレクトリのディレクトリ名
DIRECTORY_NAME = os.path.split(os.getcwd())[1]
#
def write_base_lyx_code(wf):
wf.write("""#LyX 2.2 created this file. For more info see http://www.lyx.org/
\\lyxformat 508
\\begin_document
\\begin_header
\\save_transient_properties true
\\origin unavailable
\\textclass jsbook
\\begin_preamble
\\usepackage{pxrubrica}
\\usepackage[dvipdfmx, bookmarkstype=toc, colorlinks=true, urlcolor=black, linkcolor=blue, citecolor=black, linktocpage=true, bookmarks=true]{hyperref}
\\usepackage{pxjahyper}
\\end_preamble
\\use_default_options true
\\maintain_unincluded_children false
\\language japanese
\\language_package default
\\inputencoding utf8-platex
\\fontencoding global
\\font_roman "default" "default"
\\font_sans "default" "default"
\\font_typewriter "default" "default"
\\font_math "auto" "auto"
\\font_default_family default
\\use_non_tex_fonts false
\\font_sc false
\\font_osf false
\\font_sf_scale 100 100
\\font_tt_scale 100 100
\\graphics default
\\default_output_format default
\\output_sync 0
\\bibtex_command default
\\index_command default
\\paperfontsize default
\\spacing single
\\use_hyperref false
\\pdf_bookmarks true
\\pdf_bookmarksnumbered true
\\pdf_bookmarksopen false
\\pdf_bookmarksopenlevel 1
\\pdf_breaklinks false
\\pdf_pdfborder false
\\pdf_colorlinks false
\\pdf_backref false
\\pdf_pdfusetitle true
\\papersize default
\\use_geometry false
\\use_package amsmath 1
\\use_package amssymb 1
\\use_package cancel 1
\\use_package esint 1
\\use_package mathdots 1
\\use_package mathtools 1
\\use_package mhchem 1
\\use_package stackrel 1
\\use_package stmaryrd 1
\\use_package undertilde 1
\\cite_engine basic
\\cite_engine_type default
\\biblio_style plain
\\use_bibtopic false
\\use_indices false
\\paperorientation portrait
\\suppress_date false
\\justification true
\\use_refstyle 1
\\index Index
\\shortcut idx
\\color #008000
\\end_index
\\secnumdepth -2
\\tocdepth 2
\\paragraph_separation indent
\\paragraph_indentation default
\\quotes_language english
\\papercolumns 1
\\papersides 1
\\paperpagestyle default
\\tracking_changes false
\\output_changes false
\\html_math_output 0
\\html_css_as_file 0
\\html_be_strict false
\\end_header
\\begin_body\n""")
wf.write("\\begin_layout Title\n" + DIRECTORY_NAME + "\n\\end_layout\n")
wf.write("""\\begin_layout Standard
\\begin_inset CommandInset toc
LatexCommand tableofcontents
\\end_inset
\\end_layout\n\n""")
write_body(wf)
wf.write("""\\end_body
\\end_document""")
# 本文の作成
def write_body(wf):
count = 0
while True:
count += 1
path = DIRECTORY_NAME + "-" + str(count) + ".txt";
if not os.path.exists(path):
break
txt2lyx(wf, path)
# 水平線の作成
def write_horizon(wf):
wf.write("""\\begin_layout Standard
\\begin_inset CommandInset line
LatexCommand rule
offset "0.5ex"
width "100col%"
height "1pt"
\\end_inset
\\end_layout\n""")
# ルビの作成
def write_ruby(wf):
wf.write("""\\begin_inset ERT
status open
\\begin_layout Plain Layout
\\backslash\n""")
wf.write("ruby[g]{" + body + "}{" + ruby + "}\n")
wf.write("""\\end_layout
\\end_inset""")
#
def write_line(wf, line):
wf.write("%s\n"%line)
#
def write_text(wf, line, bl_count):
# 空行処理
if (not line) or re.match(r"^[\s\u3000]+$", line):
bl_count += 1
return bl_count
if bl_count > 0:
wf.write("\\begin_layout Standard\n")
for i in range(0, bl_count):
wf.write("\\begin_inset VSpace defskip\n")
wf.write("\\end_inset\n")
wf.write("\\end_layout\n")
bl_count = 0
# 段落の作成
if line.startswith(' '):
#-- 段落(行下げあり)
wf.write("\\begin_layout Standard\n")
write_line(wf, line[1:])
wf.write("\\end_layout\n")
else:
#-- 段落(行下げなし)
wf.write("\\begin_layout Standard\n\\noindent\n")
write_line(wf, line)
wf.write("\\end_layout\n")
wf.write("\n")
return bl_count
#
def txt2lyx(wf, path):
line_num = 0
with codecs.open(path, 'r', encoding='utf-8') as f:
lines = re.split('\r\n|\r|\n', f.read())
preface_end_line = 0
for i,line in enumerate(lines):
if line == "********************************************":
preface_end_line = i
break
#Chapter Title
if preface_end_line > 0:
line_num = preface_end_line + 1
wf.write("\\begin_layout Chapter\n")
wf.write("%s\n"%lines[line_num])
wf.write("\\end_layout\n")
wf.write("\n")
# まえがき
bl_count = 0
for line_num in range(0, preface_end_line):
line = lines[line_num]
bl_count = write_text(wf, line, bl_count)
if preface_end_line > 0:
write_horizon(wf)
# 本文および後書き
bl_count = 0
is_start = True
for line in lines[preface_end_line + 2:]:
# あとがき
if line == "************************************************":
bl_count = 0
write_horizon(wf)
continue
# 本文
bl_count = write_text(wf, line, bl_count)
if is_start:
if bl_count > 0:
bl_count = 0
else:
is_start = False
# main
with io.open(DIRECTORY_NAME + '.lyx', mode='w', encoding='utf-8', newline='\n') as f:
write_base_lyx_code(f)
|
mit
| 8,881,267,687,739,330,000
| 24.156951
| 151
| 0.604278
| false
| 3.068928
| false
| false
| false
|
moyaproject/moya
|
moya/namespaces.py
|
1
|
3357
|
from __future__ import unicode_literals
"""XML namespaces"""
admin = "http://moyaproject.com/admin"
auth = "http://moyaproject.com/auth"
blog = "http://moyaproject.com/blog"
comments = "http://moyaproject.com/comments"
db = "http://moyaproject.com/db"
default = "http://moyaproject.com"
email = "http://moyaproject.com/email"
feedback = "http://moyaproject.com/feedback"
forms = "http://moyaproject.com/forms"
fs = "http://moyaproject.com/fs"
html = "http://moyaproject.com/html"
image = "http://moyaproject.com/image"
jsonrpc = "http://moyaproject.com/jsonrpc"
let = "http://moyaproject.com/let"
links = "http://moyaproject.com/links"
preflight = "http://moyaproject.com/preflight"
recaptcha = "http://moyaproject.com/recaptcha"
run = default
soup = "http://moyaproject.com/soup"
tables = "http://moyaproject.com/tables"
test = "http://moyaproject.com/test"
thumbnail = "http://moyaproject.com/thumbnail"
widgets = "http://moyaproject.com/widgets"
wysihtml5 = "http://moyaproject.com/wysihtml5"
namespace_docs = {
"http://moyaproject.com/admin": """
Tags defined in the [link admin]Moya Admin[/link] library.
""",
"http://moyaproject.com/auth": """
Tags defined in [link auth]Moya Auth[/link] library.
""",
"http://moyaproject.com/blog": """
Tags defined in the [link blog]Moya Blog[/link] library.
""",
"http://moyaproject.com/comments": """
Tags defined in the [link comments]Moya Comments[/link] library.
""",
"http://moyaproject.com/db": """
Tags used to work with [link db]databases[/link].
""",
"http://moyaproject.com": """
The default namespace used for most of Moya's tags.
""",
"http://moyaproject.com/email": """
Tags related to [link email]email[/link].
""",
"http://moyaproject.com/feedback": """
Tags defined in [link feedbac]Moya Feedback[/link].
""",
"http://moyaproject.com/forms": """
Tags defined in [link forms]Moya Forms[/link].
""",
"http://moyaproject.com/fs": """
Tags for working with [link project#filesystems]filesystems[/link].
""",
"http://moyaproject.com/image": """
Tags for working with [link images]images[/link].
""",
"http://moyaproject.com/jsonrpc": """
Tags for creating [link jsonrpc]JSON RPC[/link] interfaces.
""",
"http://moyaproject.com/links": """
Tags for defining [link links]links[/link].
""",
"http://moyaproject.com/preflight": """
Tags for creating [link preflight]preflight checks[/link].
""",
"http://moyaproject.com/recaptcha": """
Tags defined in [link recaptcha]Moya Google Recaptcha[/link].
""",
"http://moyaproject.com/soup": """
Tags for processing HTML tags.
""",
"http://moyaproject.com/tables": """
Tags used to create [link tables]table[/link] content.
""",
"http://moyaproject.com/test": """
Tags to build unit tests.
""",
"http://moyaproject.com/thumbnail": """
Tags defined in the [link thumbnail]Moya Thumbnail[/link] library.
""",
"http://moyaproject.com/widgets": """
Widgets defined in [link widgets]Moya Widgets[/link].
""",
"http://moyaproject.com/wysihtml5": """
Tags to create a rich text editor with [link wysihtml5]WYSIHTML5[/link].
""",
}
|
mit
| 2,579,244,657,551,941,000
| 33.608247
| 80
| 0.626154
| false
| 3.2186
| false
| false
| false
|
rspeer/csc-pysparse
|
examples/pysparse_test.py
|
1
|
8278
|
import math, os, sys, time
import numpy as Numeric
from pysparse import spmatrix
from pysparse import itsolvers
from pysparse import precon
ll = spmatrix.ll_mat(5,5)
print ll
print ll[1,1]
print ll
ll[2,1] = 1.0
ll[1,3] = 2.0
print ll
print ll.to_csr()
print ll[1,3]
print ll[1,-1]
print ll.nnz
ll.export_mtx('test.mtx')
L = spmatrix.ll_mat(10, 10)
for i in range(0, 10):
L[i,i] = float(i+1)
A = L.to_csr()
x = Numeric.ones([10], 'd')
y = Numeric.zeros([10], 'd')
print A, x, y
A.matvec(x, y)
print y
ll = spmatrix.ll_mat(100, 100)
for i in range(0, 100, 5):
for j in range(0, 100, 4):
ll[i,j] = 1.0/float(i+j+1)
A = ll.to_csr()
x = Numeric.arange(100).astype(Numeric.float)
y = Numeric.zeros(100, 'd')
z = Numeric.zeros(100, 'd')
A.matvec(x, y)
print y
print 'norm(y) = ', math.sqrt(Numeric.add.reduce(y))
##A.matvec_transp(x, z)
##print z
##print 'norm(z) = ', math.sqrt(Numeric.add.reduce(z))
L = spmatrix.ll_mat(10,10)
for i in range(10):
L[i,i] = float(i+1)
A = L.to_csr()
print A
x = Numeric.zeros(10, 'd')
b = Numeric.ones(10, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-8, 100)
print info, iter, relres
print x
if (info != 0):
print >> sys.stderr, 'cg not converged'
L2 = L.copy()
x = Numeric.zeros(10, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-8, 100)
print info, iter, relres
# -----------------------------------------------------------
print 'remove test'
n = 100
L = spmatrix.ll_mat(n, n)
for run in range(5):
print 'adding elements...'
for i in range(0,n,2):
for j in range (n):
L[i,j] = i+j+1
# print L
print L.nnz
print 'removing elements...'
for j in range(0,n,2):
for i in range (n):
L[i,j] = 0.0
# print L
print L.nnz
# -----------------------------------------------------------
print 'submatrix test'
n = 100
L = spmatrix.ll_mat(n, n)
for i in range (0, n, 2):
for j in range (1, n, 2):
L[i,j] = float(n*i + j);
print L[10:18,75:80]
print L[10:15,35:10]
print L[19:15,35:10]
# -----------------------------------------------------------
print 'submatrix assign test'
n = 10
L = spmatrix.ll_mat(n, n);
for i in range (0, n, 1):
for j in range (0, n, 1):
L[i,j] = 1.0;
print L
Z = spmatrix.ll_mat(n-2, n-2)
L[1:n-1,1:n-1] = Z
print L
print L.nnz
#------------------------------------------------------------
if 0:
f = open(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t1 = time.clock()
L = ll_mat_from_mtx(f)
t_read = time.clock() - t1
f.close()
print 'time for reading matrix data from file: %.2f sec' % t_read
if 1:
t1 = time.clock()
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t_read = time.clock() - t1
print 'time for reading matrix data from file: %.2f sec' % t_read
#------------------------------------------------------------
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/node4x3x1_A.mtx')
print L.shape, L.nnz
A = L.to_sss()
class diag_prec:
def __init__(self, A):
self.shape = A.shape
n = self.shape[0]
self.dinv = Numeric.zeros(n, 'd')
for i in xrange(n):
self.dinv[i] = 1.0 / A[i,i]
def precon(self, x, y):
Numeric.multiply(x, self.dinv, y)
def resid(A, b, x):
r = x.copy()
A.matvec(x, r)
r = b - r
return math.sqrt(Numeric.dot(r, r))
K_diag = diag_prec(A)
K_jac = precon.jacobi(A, 1.0, 1)
K_ssor = precon.ssor(A, 1.0, 1)
# K_ilu = precon.ilutp(L)
n = L.shape[0];
b = Numeric.arange(n).astype(Numeric.Float)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000)
print 'pcg, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_diag)
print 'pcg, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_jac)
print 'pcg, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_ssor)
print 'pcg, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000)
print 'minres, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_diag)
print 'minres, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_jac)
print 'minres, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_ssor)
print 'minres, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000)
print 'qmrs, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_diag)
print 'qmrs, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_jac)
print 'qmrs, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_ssor)
print 'qmrs, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000)
print 'cgs, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_diag)
print 'cgs, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_jac)
print 'cgs, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_ssor)
print 'cgs, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000)
print 'bicgstab, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_diag)
print 'bicgstab, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_jac)
print 'bicgstab, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_ssor)
print 'bicgstab, K_ssor: ', info, iter, relres, resid(A, b, x)
#------------------------------------------------------------
import superlu
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/cop18_el3_A.mtx')
##f = open('cop18_el5_A.mtx')
##L = ll_mat_from_mtx(f)
##f.close()
n11 = 4688
L = L[0:n11, 0:n11] # extract (1,1)-block
# make matrix regular
for i in xrange(n11):
L[i,i] = 1
print L.shape, L.nnz
n = L.shape[0]
B = L.to_csr()
su = superlu.factorize(B, diag_pivot_thresh=0.0)
print su.nnz
b = Numeric.arange(n).astype(Numeric.Float) / n
x = Numeric.zeros(n, 'd')
su.solve(b, x)
print 'norm(b) = %g' % math.sqrt(Numeric.dot(b, b))
print 'norm(x) = %g' % math.sqrt(Numeric.dot(x, x))
r = Numeric.zeros(n, 'd')
B.matvec(x, r)
r = b - r
print 'norm(b - A*x) = %g' % math.sqrt(Numeric.dot(r, r))
if 1:
for panel_size in [5, 10, 15]:
for relax in [1, 3, 5]:
for permc_spec in [0, 1, 2]:
for diag_pivot_thresh in [0.0, 0.5, 1.0]:
t1 = time.clock()
su = superlu.factorize(B,
panel_size=panel_size,
relax=relax,
permc_spec=permc_spec,
diag_pivot_thresh=diag_pivot_thresh)
t_fact = time.clock() - t1
t1 = time.clock()
su.solve(b, x)
t_solve = time.clock() - t1
print 'panel_size=%2d, relax=%d, permc_spec=%d, diag_pivot_thresh=%.1f nnz=%d, t_fact=%.2f, t_solve=%.2f' % \
(panel_size, relax, permc_spec, diag_pivot_thresh, su.nnz, t_fact, t_solve)
|
bsd-2-clause
| -5,013,317,756,455,400,000
| 28.35461
| 131
| 0.550374
| false
| 2.435422
| false
| false
| false
|
CivicKnowledge/ambry-ui
|
ambry_ui/session.py
|
1
|
1804
|
# Stolen from: http://flask.pocoo.org/snippets/51/
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
from itsdangerous import URLSafeTimedSerializer, BadSignature
class ItsdangerousSession(CallbackDict, SessionMixin):
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class ItsdangerousSessionInterface(SessionInterface):
salt = 'cookie-session'
session_class = ItsdangerousSession
def get_serializer(self, app):
if not app.secret_key:
return None
return URLSafeTimedSerializer(app.secret_key,
salt=self.salt)
def open_session(self, app, request):
s = self.get_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = app.permanent_session_lifetime.total_seconds()
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,domain=domain)
return
expires = self.get_expiration_time(app, session)
val = self.get_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=True,
domain=domain)
|
bsd-3-clause
| -1,761,448,075,511,564,300
| 33.056604
| 77
| 0.622506
| false
| 4.274882
| false
| false
| false
|
google-research/ssl_detection
|
third_party/FasterRCNN/FasterRCNN/modeling/model_box.py
|
1
|
7199
|
# -*- coding: utf-8 -*-
# File: model_box.py
import numpy as np
import tensorflow as tf
from collections import namedtuple
from tensorpack.tfutils.scope_utils import under_name_scope
from config import config
@under_name_scope()
def clip_boxes(boxes, window, name=None):
"""
Args:
boxes: nx4, xyxy
window: [h, w]
"""
boxes = tf.maximum(boxes, 0.0)
m = tf.tile(tf.reverse(window, [0]), [2]) # (4,)
boxes = tf.minimum(boxes, tf.cast(m, tf.float32), name=name)
return boxes
@under_name_scope()
def decode_bbox_target(box_predictions, anchors):
"""
Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape.
"""
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
# each is (...)x1x2
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(config.PREPROC.MAX_SIZE / 16.)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5 # (...)x1x2
out = tf.concat([x1y1, x2y2], axis=-2)
return tf.reshape(out, orig_shape)
@under_name_scope()
def encode_bbox_target(boxes, anchors):
"""
Args:
boxes: (..., 4), float32
anchors: (..., 4), float32
Returns:
box_encoded: (..., 4), float32 with the same shape.
"""
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
boxes_x1y1x2y2 = tf.reshape(boxes, (-1, 2, 2))
boxes_x1y1, boxes_x2y2 = tf.split(boxes_x1y1x2y2, 2, axis=1)
wbhb = boxes_x2y2 - boxes_x1y1
xbyb = (boxes_x2y2 + boxes_x1y1) * 0.5
# Note that here not all boxes are valid. Some may be zero
txty = (xbyb - xaya) / waha
twth = tf.log(wbhb / waha) # may contain -inf for invalid boxes
encoded = tf.concat([txty, twth], axis=1) # (-1x2x2)
return tf.reshape(encoded, tf.shape(boxes))
@under_name_scope()
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):
"""
Aligned version of tf.image.crop_and_resize, following our definition of
floating point boxes.
Args:
image: NCHW
boxes: nx4, x1y1x2y2
box_ind: (n,)
crop_size (int):
Returns:
n,C,size,size
"""
assert isinstance(crop_size, int), crop_size
boxes = tf.stop_gradient(boxes)
# TF's crop_and_resize produces zeros on border
if pad_border:
# this can be quite slow
image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')
boxes = boxes + 1
@under_name_scope()
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
"""
The way tf.image.crop_and_resize works (with normalized box):
Initial point (the value of output[0]): x0_box * (W_img - 1)
Spacing: w_box * (W_img - 1) / (W_crop - 1)
Use the above grid to bilinear sample.
However, what we want is (with fpcoor box):
Spacing: w_box / W_crop
Initial point: x0_box + spacing/2 - 0.5
(-0.5 because bilinear sample (in my definition) assumes floating point
coordinate
(0.0, 0.0) is the same as pixel value (0, 0))
This function transform fpcoor boxes to a format to be used by
tf.image.crop_and_resize
Returns:
y1x1y2x2
"""
x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)
spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32)
spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32)
imshape = [
tf.cast(image_shape[0] - 1, tf.float32),
tf.cast(image_shape[1] - 1, tf.float32)
]
nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1]
ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0]
nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]
nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]
return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)
image_shape = tf.shape(image)[2:]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
image = tf.transpose(image, [0, 2, 3, 1]) # nhwc
ret = tf.image.crop_and_resize(
image,
boxes,
tf.cast(box_ind, tf.int32),
crop_size=[crop_size, crop_size])
ret = tf.transpose(ret, [0, 3, 1, 2]) # ncss
return ret
@under_name_scope()
def roi_align(featuremap, boxes, resolution):
"""
Args:
featuremap: 1xCxHxW
boxes: Nx4 floatbox
resolution: output spatial resolution
Returns:
NxCx res x res
"""
# sample 4 locations per roi bin
ret = crop_and_resize(featuremap, boxes,
tf.zeros([tf.shape(boxes)[0]], dtype=tf.int32),
resolution * 2)
try:
avgpool = tf.nn.avg_pool2d
except AttributeError:
avgpool = tf.nn.avg_pool
ret = avgpool(
ret, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format='NCHW')
return ret
class RPNAnchors(namedtuple('_RPNAnchors', ['boxes', 'gt_labels', 'gt_boxes'])):
"""
boxes (FS x FS x NA x 4): The anchor boxes.
gt_labels (FS x FS x NA):
gt_boxes (FS x FS x NA x 4): Groundtruth boxes corresponding to each anchor.
"""
def encoded_gt_boxes(self):
return encode_bbox_target(self.gt_boxes, self.boxes)
def decode_logits(self, logits):
return decode_bbox_target(logits, self.boxes)
@under_name_scope()
def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this featuremap.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0)
slice4d = tf.concat([shape2d, [-1, -1]], axis=0)
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes)
if __name__ == '__main__':
"""
Demonstrate what's wrong with tf.image.crop_and_resize.
Also reported at https://github.com/tensorflow/tensorflow/issues/26278
"""
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
# want to crop 2x2 out of a 5x5 image, and resize to 4x4
image = np.arange(25).astype('float32').reshape(5, 5)
boxes = np.asarray([[1, 1, 3, 3]], dtype='float32')
target = 4
print(crop_and_resize(image[None, None, :, :], boxes, [0], target)[0][0])
"""
Expected values:
4.5 5 5.5 6
7 7.5 8 8.5
9.5 10 10.5 11
12 12.5 13 13.5
You cannot easily get the above results with tf.image.crop_and_resize.
Try out yourself here:
"""
print(
tf.image.crop_and_resize(image[None, :, :, None],
np.asarray([[1, 1, 2, 2]]) / 4.0, [0],
[target, target])[0][:, :, 0])
|
apache-2.0
| 4,270,145,771,473,847,000
| 29.634043
| 80
| 0.600222
| false
| 2.81982
| false
| false
| false
|
christiankaiser/spatial-tools
|
src/v.to.raster/v.to.raster.py
|
1
|
4911
|
#!/usr/bin/env python
"""
Converts a vector layer to a raster
"""
__version__ = '1.0.0'
__date__ = '2011-03-24'
__author__ = 'Christian Kaiser <chri.kais@gmail.com>'
import commands
import math
import osgeo.ogr as ogr
import osgeo.gdal as gdal
import osgeo.osr as osr
from optparse import OptionParser
import sys
USAGE = """v.to.raster
--input INPUT_LAYER --output OUTPUT_RASTER --attr ATTR
[--res RESOLUTION] [--size RASTERX,RASTERY]
[--envelope XMIN,YMIN,XMAX,YMAX]
"""
def vector_to_raster(ogrfile, attribute, rasterfile, rastersize=None,
res=None, minx=None, maxx=None, miny=None, maxy=None):
"""
Transforms an OGR compatible vector layer into a raster layer in HFA
format. The value of the provided attribute is used as value for the
raster. This function is based on gdal_rasterize, so Python needs access
to this tool.
"""
#print("vector_to_raster: opening %s" % ogrfile)
# Open the vector file
inds = ogr.Open(ogrfile)
if inds is None:
raise Exception("Unable to open %s\n" % ogrfile)
# Check if there is at least one layer in the OGR datasource.
nlyrs = inds.GetLayerCount()
if nlyrs < 1:
raise Exception("Data source %s does not have any layer.\n" % ogrfile)
# Get the layer from the vector file
#lyrname = os.path.splitext(os.path.basename(ogrfile))[0]
#try:
# lyr = inds.GetLayerByName(lyrname)
#except:
lyr = inds.GetLayer(0)
lyrname = lyr.GetLayerDefn().GetName()
if lyr == None:
raise Exception("Unable to open OGR layer in %s\n" % ogrfile)
# We have to create a new raster dataset first.
# Determine the extent of the vector file if the extent is not provided.
if minx == None or maxx == None or miny == None or maxy == None:
extent = lyr.GetExtent()
if minx == None: minx = extent[0]
if maxx == None: maxx = extent[1]
if miny == None: miny = extent[2]
if maxy == None: maxy = extent[3]
if minx > maxx:
minx = extent[0]
maxx = extent[1]
if miny > maxy:
miny = extent[2]
maxy = extent[3]
# Compute the resolution if not provided
if res is None:
xres = (maxx - minx) / rastersize[0]
yres = (maxy - miny) / rastersize[1]
res = xres
if yres > xres: res = yres
# Adjust the raster size to fit the extent proportions
sizex = int(math.ceil((maxx - minx) / res))
sizey = int(math.ceil((maxy - miny) / res))
# Create a new raster layer
rasterDriver = gdal.GetDriverByName('HFA')
outds = rasterDriver.Create(rasterfile, sizex, sizey, 1, gdal.GDT_Float64)
rasterTransform = [minx, res, 0.0, maxy, 0.0, -res]
outds.SetGeoTransform(rasterTransform)
# Get projection of OGR file and assign to raster
srs = osr.SpatialReference()
srs.ImportFromWkt(lyr.GetSpatialRef().__str__())
outds.SetProjection(srs.ExportToWkt())
# Close the vector and raster files.
inds = None
outds = None
# Execute gdal_rasterize
commandString = "gdal_rasterize -a %s -l %s %s %s" % (attribute, lyrname, ogrfile, rasterfile)
commandOutput = commands.getoutput(commandString)
if __name__ == "__main__":
parser = OptionParser(usage=USAGE)
parser.add_option(
'-i', '--input', dest="input",
help="OGR compatible input vector layer",
metavar="INPUT_LAYER"
)
parser.add_option(
'-o', '--output', dest="output",
help="Path to output raster file",
metavar="OUTPUT_RASTER"
)
parser.add_option(
'-a', '--attr', dest="attr",
help="Attribute name containing the pixel value",
metavar="ATTR"
)
parser.add_option(
'-r', '--res', dest="res",
help="Raster pixel size (image resolution)"
)
parser.add_option(
'-s', '--size', dest="size",
help="Raster size"
)
parser.add_option(
'-e', '--env', dest="env",
help="Bounding box"
)
(options, args) = parser.parse_args()
if options.size != None:
size = map(int, options.size.split(','))
else:
size = None
if options.res != None:
res = float(options.res)
else:
res = None
if options.env != None:
xmin, ymin, xmax, ymax = map(float, options.env.split(','))
else:
xmin = ymin = xmax = ymax = None
if options.input == None or options.output == None or options.attr == None:
print USAGE
sys.exit(0)
print("v.to.raster starting...")
vector_to_raster(
ogrfile = options.input,
attribute = options.attr,
rasterfile = options.output,
rastersize = size,
res = res,
minx = xmin, maxx = xmax, miny = ymin, maxy = ymax
)
print("v.to.raster done")
|
gpl-3.0
| -648,093,374,068,651,400
| 29.314815
| 98
| 0.59438
| false
| 3.470671
| false
| false
| false
|
nfqsolutions/pylm
|
setup.py
|
1
|
2731
|
#!/usr/bin/env python
from setuptools import setup
__version__ = None
with open('pylm/__init__.py') as f:
exec(f.read())
long_description = """
Pylm
====
Pylm is the Python implementation of PALM, a framework to build
clusters of high performance components. It is presented in two
different levels of abstraction. In the high level API you will find
servers and clients that are functional *out of the box*. Use the high
level API if you are interested in simple communication patterns like
client-server, master-slave or a streaming pipeline. In the low level
API there are a variety of small components that, once combined,
they can be used to implement almost any kind of
component. It's what the high level API uses under the hood. Choose
the low level API if you are interested in creating your custom
component and your custom communication pattern.
**Pylm requires a version of Python equal or higher than 3.4, and it is
more thoroughly tested with Python 3.5.**
Installing **pylm** is as easy as:
.. code-block:: bash
$> pip install pylm
* `PYPI package page <https://pypi.python.org/pypi/pylm/>`_
* `Documentation <http://pylm.readthedocs.io/en/latest/>`_
* `Source code <https://github.com/nfqsolutions/pylm>`_
Pylm is released under a dual licensing scheme. The source is released
as-is under the the AGPL version 3 license, a copy of the license is
included with the source. If this license does not suit you,
you can purchase a commercial license from `NFQ Solutions
<http://nfqsolutions.com>`_
This project has been funded by the Spanish Ministry of Economy and
Competitivity under the grant IDI-20150936, cofinanced with FEDER
funds.
"""
setup(name='pylm',
version=__version__,
description='A framework to build clusters of high performance components',
long_description=long_description,
author='Guillem Borrell',
author_email='guillemborrell@gmail.com',
packages=['pylm',
'pylm.parts',
'pylm.persistence',
'pylm.remote'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Distributed Computing',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: GNU Affero General Public License v3'
],
setup_requires=['pytest-runner'],
install_requires=['protobuf>=3.0.0', 'requests', 'pyzmq']
)
|
agpl-3.0
| -3,243,969,502,929,084,000
| 34.467532
| 81
| 0.682534
| false
| 3.998536
| false
| true
| false
|
hubo1016/vlcp
|
vlcp/service/kvdb/objectdb.py
|
1
|
63427
|
'''
Created on 2016/3/24
:author: hubo
'''
from vlcp.config.config import defaultconfig
import vlcp.service.kvdb.storage as storage
import vlcp.service.kvdb.redisnotifier as redisnotifier
from vlcp.server.module import depend, Module, call_api, api
import vlcp.utils.dataobject as dataobj
from vlcp.event.runnable import RoutineContainer
from vlcp.event.event import Event, withIndices, M_
from time import time
from copy import deepcopy
from vlcp.event.core import QuitException, syscall_removequeue
import itertools
from vlcp.utils.dataobject import AlreadyExistsException, UniqueKeyReference,\
MultiKeyReference, DataObjectSet, UniqueKeySet, WeakReferenceObject,\
MultiKeySet, ReferenceObject, request_context
from contextlib import closing
import functools
import copy
from vlcp.utils.exceptions import AsyncTransactionLockException, StaleResultException,\
TransactionRetryExceededException, TransactionTimeoutException, WalkKeyNotRetrieved
try:
from itertools import izip
except ImportError:
izip = zip
@withIndices()
class RetrieveRequestSend(Event):
pass
@withIndices('id')
class RetrieveReply(Event):
pass
def _str(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
else:
return str(b)
def _str2(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
elif hasattr(b, 'getkey'):
return b.getkey()
else:
return str(b)
class _NeedMoreKeysException(Exception):
pass
@defaultconfig
@depend(storage.KVStorage, redisnotifier.UpdateNotifier)
class ObjectDB(Module):
"""
Abstract transaction layer for KVDB
"""
service = True
# Priority for object update event
_default_objectupdatepriority = 450
# Enable debugging mode for updater: all updaters will be called for an extra time
# to make sure it does not crash with multiple calls
_default_debuggingupdater = False
def __init__(self, server):
Module.__init__(self, server)
self._managed_objs = {}
self._watches = {}
self._requestids = {}
self._watchedkeys = set()
self._requests = []
self._transactno = 0
self._stale = False
self._updatekeys = set()
self._update_version = {}
self._cache = None
self._pending_gc = 0
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._update
self.routines.append(self.apiroutine)
self.create_api(api(self.mget, self.apiroutine),
api(self.get, self.apiroutine),
api(self.mgetonce, self.apiroutine),
api(self.getonce, self.apiroutine),
api(self.mwatch, self.apiroutine),
api(self.watch, self.apiroutine),
api(self.munwatch, self.apiroutine),
api(self.unwatch, self.apiroutine),
api(self.unwatchall, self.apiroutine),
api(self.transact, self.apiroutine),
api(self.watchlist),
api(self.walk, self.apiroutine),
api(self.gettimestamp, self.apiroutine),
api(self.asynctransact, self.apiroutine),
api(self.writewalk, self.apiroutine),
api(self.asyncwritewalk, self.apiroutine)
)
def _set_watch(self, key, requestid):
self._watches.setdefault(key, set()).add(requestid)
self._requestids.setdefault(requestid, set()).add(key)
def _remove_watch(self, key, requestid):
s = self._watches.get(key)
if s:
s.discard(requestid)
if not s:
del self._watches[key]
s = self._requestids.get(requestid)
if s:
s.discard(key)
if not s:
del self._requestids[requestid]
def _remove_all_watches(self, requestid):
s = self._requestids.get(requestid)
if s is not None:
for k in s:
s2 = self._watches.get(k)
if s2:
s2.discard(requestid)
if not s2:
del self._watches[k]
del self._requestids[requestid]
async def load(self, container):
self.scheduler.queue.addSubQueue(\
self.objectupdatepriority, dataobj.DataObjectUpdateEvent.createMatcher(), 'dataobjectupdate')
self._notifier = await call_api(container, 'updatenotifier', 'createnotifier')
await Module.load(self, container)
self.routines.append(self._notifier)
async def unload(self, container, force=False):
await container.syscall(syscall_removequeue(self.scheduler.queue, 'dataobjectupdate'))
await Module.unload(self, container, force=force)
async def _update(self):
timestamp = '%012x' % (int(time() * 1000),) + '-'
notification_matcher = self._notifier.notification_matcher(False)
def copywithkey(obj, key):
newobj = deepcopy(obj)
if hasattr(newobj, 'setkey'):
newobj.setkey(key)
return newobj
def getversion(obj):
if obj is None:
return (0, -1)
else:
return (getattr(obj, 'kvdb_createtime', 0), getattr(obj, 'kvdb_updateversion', 0))
def isnewer(obj, version):
if obj is None:
return version[1] != -1
else:
return getversion(obj) > version
request_matcher = RetrieveRequestSend.createMatcher()
def onupdate(event, matcher):
update_keys = self._watchedkeys.intersection([_str(k) for k in event.keys])
self._updatekeys.update(update_keys)
if event.extrainfo:
for k,v in zip(event.keys, event.extrainfo):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
else:
for k in event.keys:
try:
del self._update_version[_str(k)]
except KeyError:
pass
async def updateinner():
processing_requests = []
# New managed keys
retrieve_list = set()
orig_retrieve_list = set()
retrieveonce_list = set()
orig_retrieveonce_list = set()
processing_request_ids = set()
# Retrieved values are stored in update_result before merging into current storage
update_result = {}
# key => [(walker_func, (original_keys, rid)), ...]
walkers = {}
# Use the loop count as a revision identifier, then the valid revisions of the value
# in update_result is a range, from the last loop count the value changed
# (or -1 if not changed), to the last loop count the value is retrieved
#
# each walker can only walk on keys that shares at least one revision to ensure the
# values are consistent. If no revision could be shared, all the keys must be retrieved
# again to get a consistent view
revision_min = {}
revision_max = {}
self._loopCount = 0
# A request-id -> retrieve set dictionary to store the saved keys
savelist = {}
# (start_key, walker_func, rid) => set(used_keys)
walker_used_keys = {}
# used_key => [(start_key, walker_func, (original_keys, rid)), ...]
used_key_ref = {}
def _update_walker_ref(start_key, walker, original_keys, rid, used_keys):
old_used_keys = walker_used_keys.get((start_key, walker, rid), ())
for k in old_used_keys:
if k not in used_keys:
old_list = used_key_ref[k]
for i, v in enumerate(old_list):
if v[0] == start_key and v[1] == walker and v[2][1] == rid:
break
else:
continue
old_list[i:] = old_list[i+1:]
for k in used_keys:
if k not in old_used_keys:
used_key_ref.setdefault(k, []).append((start_key, walker, (original_keys, rid)))
walker_used_keys[(start_key, walker, rid)] = set(used_keys)
# (start_key, walker, rid) => cached_result
finished_walkers = {}
def _dirty_walkers(new_values):
for k in new_values:
if k in used_key_ref:
for start_key, walker, (_, rid) in used_key_ref[k]:
finished_walkers.pop((start_key, walker, rid), None)
async def updateloop():
while (retrieve_list or self._updatekeys or self._requests):
# default walker, default walker cached, customized walker, customized walker cached
_performance_counters = [0, 0, 0, 0]
# Updated keys
update_list = set()
if self._loopCount >= 10 and not retrieve_list:
if not self._updatekeys:
break
elif self._loopCount >= 100:
# Too many updates, we must stop to respond
self._logger.warning("There are still database updates after 100 loops of mget, respond with potential inconsistent values")
break
if self._updatekeys:
update_list.update(self._updatekeys)
self._updatekeys.clear()
if self._requests:
# Processing requests
for r in self._requests:
if r[2] == 'unwatch':
try:
for k in r[0]:
self._remove_watch(k, r[3])
# Do not need to wait
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'unwatchall':
if r[3] in processing_request_ids:
# unwatch a processing request
# pend this request until all requests are processed
processing_requests.append(r)
else:
try:
self._remove_all_watches(r[3])
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'watch':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
for k in r[0]:
self._set_watch(k, r[3])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'get':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'walk':
retrieve_list.update(r[0])
processing_requests.append(r)
for k,v in r[3].items():
walkers.setdefault(k, []).append((v, (r[0], r[1])))
processing_request_ids.add(r[4])
else:
retrieveonce_list.update(r[0])
orig_retrieveonce_list.update(r[0])
processing_requests.append(r)
del self._requests[:]
if retrieve_list:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
# Add watch_keys to notification
if watch_keys:
for k in watch_keys:
if k in update_result:
self._update_version[k] = getversion(update_result[k])
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
get_list_set = update_list.union(itertools.chain((k for k in retrieve_list
if k not in update_result and k not in self._managed_objs),
(k for k in retrieveonce_list
if k not in update_result and k not in self._managed_objs)))
get_list = list(get_list_set)
new_values = set()
if get_list:
try:
result, self._cache = await call_api(
self.apiroutine,
'kvstorage',
'mgetwithcache',
{'keys': get_list, 'cache': self._cache}
)
except QuitException:
raise
except Exception:
# Serve with cache
if not self._stale:
self._logger.warning('KVStorage retrieve failed, serve with cache', exc_info = True)
self._stale = True
# Discard all retrieved results
update_result.clear()
# Retry update later
self._updatekeys.update(update_list)
#break
revision_min.clear()
revision_max.clear()
else:
self._stale = False
for k,v in izip(get_list, result):
# Update revision information
revision_max[k] = self._loopCount
if k not in update_result:
if k not in self._managed_objs:
# A newly retrieved key
revision_min[k] = self._loopCount
old_value = None
else:
old_value = self._managed_objs[k]
else:
old_value = update_result[k]
# Check if the value is changed
if old_value is not v and getversion(old_value) != getversion(v):
revision_min[k] = self._loopCount
new_values.add(k)
else:
if k not in revision_min:
revision_min[k] = -1
if old_value is not v:
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if k in self._watchedkeys and k not in self._update_version:
self._update_version[k] = getversion(v)
update_result.update(zip(get_list, result))
# Disable cache for walkers with updated keys
_dirty_walkers(new_values)
# All keys which should be retrieved in next loop
new_retrieve_list = set()
# Keys which should be retrieved in next loop for a single walk
new_retrieve_keys = set()
# Keys that are used in current walk will be retrieved again in next loop
used_keys = set()
# We separate the data with revisions to prevent inconsistent result
def create_walker(orig_key, strict=True):
revision_range = [revision_min.get(orig_key, -1), revision_max.get(orig_key, -1)]
def _walk_with_revision(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key not in self._watchedkeys:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
elif self._stale:
if key not in self._managed_objs:
new_retrieve_keys.add(key)
used_keys.add(key)
return self._managed_objs.get(key)
elif key not in update_result and key not in self._managed_objs:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
# Check revision
current_revision = (
max(revision_min.get(key, -1), revision_range[0]),
min(revision_max.get(key, -1), revision_range[1])
)
if current_revision[1] < current_revision[0]:
# revisions cannot match
new_retrieve_keys.add(key)
if strict:
used_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
# update revision range
revision_range[:] = current_revision
if key in update_result:
used_keys.add(key)
return update_result[key]
else:
used_keys.add(key)
return self._managed_objs[key]
return _walk_with_revision
_default_walker_dup_check = set()
def default_walker(key, obj, walk, _circle_detect = None):
if _circle_detect is None:
_circle_detect = set()
if key in _circle_detect:
return
else:
_circle_detect.add(key)
if hasattr(obj, 'kvdb_internalref'):
rl = obj.kvdb_internalref()
for k in rl:
try:
newobj = walk(k)
except KeyError:
pass
else:
if newobj is not None:
default_walker(k, newobj, walk, _circle_detect)
def _do_default_walker(k):
if k not in _default_walker_dup_check:
_default_walker_dup_check.add(k)
_performance_counters[0] += 1
if (k, None, None) not in finished_walkers:
v = update_result.get(k)
if v is not None:
new_retrieve_keys.clear()
used_keys.clear()
default_walker(k, v, create_walker(k, False))
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, None, None, None, _all_used_keys)
finished_walkers[(k, None, None)] = None
else:
_update_walker_ref(k, None, None, None, [k])
finished_walkers[(k, None, None)] = None
else:
_performance_counters[1] += 1
for k in orig_retrieve_list:
_do_default_walker(k)
savelist.clear()
for k,ws in walkers.items():
# k: the walker key
# ws: list of [walker_func, (request_original_keys, rid)]
# Retry every walker, starts with k, with the value of v
if k in update_result:
# The value is newly retrieved
v = update_result.get(k)
else:
# Use the stored value
v = self._managed_objs.get(k)
if ws:
for w,r in list(ws):
# w: walker_func
# r: (request_original_keys, rid)
# Custom walker
_performance_counters[2] += 1
_cache_key = (k, w, r[1])
if _cache_key in finished_walkers:
_performance_counters[3] += 1
savelist.setdefault(r[1], set()).update(finished_walkers[_cache_key])
else:
_local_save_list = set()
def save(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key != k and key not in used_keys:
raise ValueError('Cannot save a key without walk')
_local_save_list.add(key)
try:
new_retrieve_keys.clear()
used_keys.clear()
w(k, v, create_walker(k), save)
except Exception as exc:
# if one walker failed, the whole request is failed, remove all walkers
self._logger.warning("A walker raises an exception which rolls back the whole walk process. "
"walker = %r, start key = %r, new_retrieve_keys = %r, used_keys = %r",
w, k, new_retrieve_keys, used_keys, exc_info=True)
for orig_k in r[0]:
if orig_k in walkers:
walkers[orig_k][:] = [(w0, r0) for w0,r0 in walkers[orig_k] if r0[1] != r[1]]
processing_requests[:] = [r0 for r0 in processing_requests if r0[1] != r[1]]
savelist.pop(r[1])
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
savelist.setdefault(r[1], set()).update(_local_save_list)
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, w, r[0], r[1], _all_used_keys)
finished_walkers[_cache_key] = _local_save_list
for save in savelist.values():
for k in save:
_do_default_walker(k)
retrieve_list.clear()
retrieveonce_list.clear()
retrieve_list.update(new_retrieve_list)
self._logger.debug("Loop %d: %d default walker (%d cached), %d customized walker (%d cached)",
self._loopCount,
*_performance_counters)
self._loopCount += 1
if self._stale:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
if watch_keys:
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
break
while True:
await self.apiroutine.with_callback(updateloop(), onupdate, notification_matcher)
if self._loopCount >= 100 or self._stale:
break
# If some updated result is newer than the notification version, we should wait for the notification
should_wait = False
for k,v in update_result.items():
if k in self._watchedkeys:
oldv = self._update_version.get(k)
if oldv is not None and isnewer(v, oldv):
should_wait = True
break
if should_wait:
timeout, ev, m = await self.apiroutine.wait_with_timeout(0.2, notification_matcher)
if timeout:
break
else:
onupdate(ev, m)
else:
break
# Update result
send_events = []
self._transactno += 1
transactid = '%s%016x' % (timestamp, self._transactno)
update_objs = []
for k,v in update_result.items():
if k in self._watchedkeys:
if v is None:
oldv = self._managed_objs.get(k)
if oldv is not None:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.DELETED))
else:
update_objs.append((k, None, dataobj.DataObjectUpdateEvent.DELETED))
del self._managed_objs[k]
else:
oldv = self._managed_objs.get(k)
if oldv is not None:
if oldv != v:
if oldv and hasattr(oldv, 'kvdb_update'):
oldv.kvdb_update(v)
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.UPDATED))
else:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
else:
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
for k in update_result.keys():
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_retrievefinished'):
v.kvdb_retrievefinished(self._managed_objs)
allkeys = tuple(k for k,_,_ in update_objs)
send_events.extend((dataobj.DataObjectUpdateEvent(k, transactid, t, object = v, allkeys = allkeys) for k,v,t in update_objs))
# Process requests
unwatchall = []
for r in processing_requests:
if r[2] == 'get':
objs = [self._managed_objs.get(k) for k in r[0]]
for k,v in zip(r[0], objs):
if v is not None:
self._set_watch(k, r[3])
result = [o.create_reference() if o is not None and hasattr(o, 'create_reference') else o
for o in objs]
elif r[2] == 'watch':
result = [(v.create_reference() if hasattr(v, 'create_reference') else v)
if v is not None else dataobj.ReferenceObject(k)
for k,v in ((k,self._managed_objs.get(k)) for k in r[0])]
elif r[2] == 'walk':
saved_keys = list(savelist.get(r[1], []))
for k in saved_keys:
self._set_watch(k, r[4])
objs = [self._managed_objs.get(k) for k in saved_keys]
result = (saved_keys,
[o.create_reference() if hasattr(o, 'create_reference') else o
if o is not None else dataobj.ReferenceObject(k)
for k,o in zip(saved_keys, objs)])
elif r[2] == 'unwatchall':
# Remove watches after all results are processed
unwatchall.append(r[3])
result = None
else:
result = [copywithkey(update_result.get(k, self._managed_objs.get(k)), k) for k in r[0]]
send_events.append(RetrieveReply(r[1], result = result, stale = self._stale))
for requestid in unwatchall:
self._remove_all_watches(requestid)
async def output_result():
for e in send_events:
await self.apiroutine.wait_for_send(e)
await self.apiroutine.with_callback(output_result(), onupdate, notification_matcher)
self._pending_gc += 1
async def _gc():
# Use DFS to remove unwatched objects
mark_set = set()
def dfs(k):
if k in mark_set:
return
mark_set.add(k)
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_internalref'):
for k2 in v.kvdb_internalref():
dfs(k2)
for k in self._watches.keys():
dfs(k)
remove_keys = self._watchedkeys.difference(mark_set)
if remove_keys:
self._watchedkeys.difference_update(remove_keys)
await self._notifier.remove_listen(*tuple(remove_keys))
for k in remove_keys:
if k in self._managed_objs:
del self._managed_objs[k]
if k in self._update_version:
del self._update_version[k]
if self._cache is not None:
self._cache.gc(self._managed_objs)
self._pending_gc = 0
while True:
if not self._updatekeys and not self._requests:
if self._pending_gc >= 10:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
elif self._pending_gc:
timeout, ev, m = await self.apiroutine.wait_with_timeout(1, notification_matcher, request_matcher)
if timeout:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
else:
ev, m = await M_(notification_matcher, request_matcher)
if m is notification_matcher:
onupdate(ev, m)
await updateinner()
async def mget(self, keys, requestid, nostale = False):
"Get multiple objects and manage them. Return references to the objects."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'get', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def get(self, key, requestid, nostale = False):
"""
Get an object from specified key, and manage the object.
Return a reference to the object or None if not exists.
"""
r = await self.mget([key], requestid, nostale)
return r[0]
async def mgetonce(self, keys, nostale = False):
"Get multiple objects, return copies of them. Referenced objects are not retrieved."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'getonce'))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def getonce(self, key, nostale = False):
"Get a object without manage it. Return a copy of the object, or None if not exists. Referenced objects are not retrieved."
r = await self.mgetonce([key], nostale)
return r[0]
async def watch(self, key, requestid, nostale = False):
"""
Try to find an object and return a reference. Use ``reference.isdeleted()`` to test
whether the object exists.
Use ``reference.wait(container)`` to wait for the object to be existed.
"""
r = await self.mwatch([key], requestid, nostale)
return r[0]
async def mwatch(self, keys, requestid, nostale = False):
"Try to return all the references, see ``watch()``"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'watch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def unwatch(self, key, requestid):
"Cancel management of a key"
await self.munwatch([key], requestid)
async def unwatchall(self, requestid):
"Cancel management for all keys that are managed by requestid"
notify = not self._requests
rid = object()
self._requests.append(((), rid, 'unwatchall', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def munwatch(self, keys, requestid):
"Cancel management of keys"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'unwatch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def transact(self, keys, updater, withtime = False, maxtime = 60):
"""
Try to update keys in a transact, with an ``updater(keys, values)``,
which returns ``(updated_keys, updated_values)``.
The updater may be called more than once. If ``withtime = True``,
the updater should take three parameters:
``(keys, values, timestamp)`` with timestamp as the server time
"""
keys = tuple(_str2(k) for k in keys)
updated_ref = [None, None]
extra_keys = []
extra_key_set = []
auto_remove_keys = set()
orig_len = len(keys)
def updater_with_key(keys, values, timestamp):
# Automatically manage extra keys
remove_uniquekeys = []
remove_multikeys = []
update_uniquekeys = []
update_multikeys = []
keystart = orig_len + len(auto_remove_keys)
for v in values[:keystart]:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
remove_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
remove_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
if self.debuggingupdater:
# Updater may be called more than once, ensure that this updater does not crash
# on multiple calls
kc = keys[:orig_len]
vc = [v.clone_instance() if v is not None and hasattr(v, 'clone_instance') else deepcopy(v) for v in values[:orig_len]]
if withtime:
updated_keys, updated_values = updater(kc, vc, timestamp)
else:
updated_keys, updated_values = updater(kc, vc)
if withtime:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len], timestamp)
else:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len])
for v in updated_values:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
update_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
update_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
extrakeysdict = dict(zip(keys[keystart:keystart + len(extra_keys)], values[keystart:keystart + len(extra_keys)]))
extrakeysetdict = dict(zip(keys[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)],
values[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)]))
tempdict = {}
old_values = dict(zip(keys, values))
updated_keyset = set(updated_keys)
try:
append_remove = set()
autoremove_keys = set()
# Use DFS to find auto remove keys
def dfs(k):
if k in autoremove_keys:
return
autoremove_keys.add(k)
if k not in old_values:
append_remove.add(k)
else:
oldv = old_values[k]
if oldv is not None and hasattr(oldv, 'kvdb_autoremove'):
for k2 in oldv.kvdb_autoremove():
dfs(k2)
for k,v in zip(updated_keys, updated_values):
if v is None:
dfs(k)
if append_remove:
raise _NeedMoreKeysException()
for k,v in remove_uniquekeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() == v.getkey():
# If the unique key does not reference to the correct object
# there may be an error, but we ignore this.
# Save in a temporary dictionary. We may restore it later.
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in remove_multikeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is not None:
mk.set.dataset().discard(v)
if not mk.set.dataset():
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in update_uniquekeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() != v.getkey():
raise AlreadyExistsException('Unique key conflict for %r and %r, with key %r' % \
(extrakeysdict[k].ref.getkey(), v.getkey(), k))
elif extrakeysdict[k] is None:
lv = tempdict.get(k, None)
if lv is not None and lv.ref.getkey() == v.getkey():
# Restore this value
nv = lv
else:
nv = UniqueKeyReference.create_from_key(k)
nv.ref = ReferenceObject(v.getkey())
extrakeysdict[k] = nv
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(nv.create_weakreference())
for k,v in update_multikeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is None:
mk = tempdict.get(k, None)
if mk is None:
mk = MultiKeyReference.create_from_key(k)
mk.set = DataObjectSet()
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(mk.create_weakreference())
mk.set.dataset().add(v)
extrakeysdict[k] = mk
except _NeedMoreKeysException:
# Prepare the keys
extra_keys[:] = list(set(itertools.chain((k for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,_ in update_uniquekeys),
(k for k,_ in update_multikeys))))
extra_key_set[:] = list(set(itertools.chain((UniqueKeyReference.get_keyset_from_key(k) for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(MultiKeyReference.get_keyset_from_key(k) for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(UniqueKeyReference.get_keyset_from_key(k) for k,_ in update_uniquekeys),
(MultiKeyReference.get_keyset_from_key(k) for k,_ in update_multikeys))))
auto_remove_keys.clear()
auto_remove_keys.update(autoremove_keys.difference(keys[:orig_len])
.difference(extra_keys)
.difference(extra_key_set))
raise
else:
extrakeys_list = list(extrakeysdict.items())
extrakeyset_list = list(extrakeysetdict.items())
autoremove_list = list(autoremove_keys.difference(updated_keys)
.difference(extrakeysdict.keys())
.difference(extrakeysetdict.keys()))
return (tuple(itertools.chain(updated_keys,
(k for k,_ in extrakeys_list),
(k for k,_ in extrakeyset_list),
autoremove_list)),
tuple(itertools.chain(updated_values,
(v for _,v in extrakeys_list),
(v for _,v in extrakeyset_list),
[None] * len(autoremove_list))))
def object_updater(keys, values, timestamp):
old_version = {}
for k, v in zip(keys, values):
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if v is not None and hasattr(v, 'kvdb_createtime'):
old_version[k] = (getattr(v, 'kvdb_createtime'), getattr(v, 'kvdb_updateversion', 1))
updated_keys, updated_values = updater_with_key(keys, values, timestamp)
updated_ref[0] = tuple(updated_keys)
new_version = []
for k,v in zip(updated_keys, updated_values):
if v is None:
new_version.append((timestamp, -1))
elif k in old_version:
ov = old_version[k]
setattr(v, 'kvdb_createtime', ov[0])
setattr(v, 'kvdb_updateversion', ov[1] + 1)
new_version.append((ov[0], ov[1] + 1))
else:
setattr(v, 'kvdb_createtime', timestamp)
setattr(v, 'kvdb_updateversion', 1)
new_version.append((timestamp, 1))
updated_ref[1] = new_version
return (updated_keys, updated_values)
start_time = self.apiroutine.scheduler.current_time
retry_times = 1
while True:
try:
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': keys + tuple(auto_remove_keys) + \
tuple(extra_keys) + tuple(extra_key_set),
'updater': object_updater})
except _NeedMoreKeysException:
if maxtime is not None and\
self.apiroutine.scheduler.current_time - start_time > maxtime:
raise TransactionTimeoutException
retry_times += 1
except Exception:
self._logger.debug("Transaction %r interrupted in %r retries", updater, retry_times)
raise
else:
self._logger.debug("Transaction %r done in %r retries", updater, retry_times)
break
# Short cut update notification
update_keys = self._watchedkeys.intersection(updated_ref[0])
self._updatekeys.update(update_keys)
for k,v in zip(updated_ref[0], updated_ref[1]):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
if not self._requests:
# Fake notification
await self.apiroutine.wait_for_send(RetrieveRequestSend())
await self._notifier.publish(updated_ref[0], updated_ref[1])
async def gettimestamp(self):
"""
Get a timestamp from database server
"""
_timestamp = None
def _updater(keys, values, timestamp):
nonlocal _timestamp
_timestamp = timestamp
return ((), ())
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': (),
'updater': _updater})
return _timestamp
def watchlist(self, requestid = None):
"""
Return a dictionary whose keys are database keys, and values are lists of request ids.
Optionally filtered by request id
"""
return dict((k,list(v)) for k,v in self._watches.items() if requestid is None or requestid in v)
async def walk(self, keys, walkerdict, requestid, nostale = False):
"""
Recursively retrieve keys with customized functions.
walkerdict is a dictionary ``key->walker(key, obj, walk, save)``.
"""
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'walk', dict(walkerdict), requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def asynctransact(self, asyncupdater, withtime = False,
maxretry = None, maxtime=60):
"""
Read-Write transaction with asynchronous operations.
First, the `asyncupdater` is called with `asyncupdater(last_info, container)`.
`last_info` is the info from last `AsyncTransactionLockException`.
When `asyncupdater` is called for the first time, last_info = None.
The async updater should be an async function, and return
`(updater, keys)`. The `updater` should
be a valid updater function used in `transaction` API. `keys` will
be the keys used in the transaction.
The async updater can return None to terminate the transaction
without exception.
After the call, a transaction is automatically started with the
return values of `asyncupdater`.
`updater` can raise `AsyncTransactionLockException` to restart
the transaction from `asyncupdater`.
:param asyncupdater: An async updater `asyncupdater(last_info, container)`
which returns `(updater, keys)`
:param withtime: Whether the returned updater need a timestamp
:param maxretry: Limit the max retried times
:param maxtime: Limit the execution time. The transaction is abandoned
if still not completed after `maxtime` seconds.
"""
start_time = self.apiroutine.scheduler.current_time
def timeleft():
if maxtime is None:
return None
else:
time_left = maxtime + start_time - \
self.apiroutine.scheduler.current_time
if time_left <= 0:
raise TransactionTimeoutException
else:
return time_left
retry_times = 0
last_info = None
while True:
timeout, r = \
await self.apiroutine.execute_with_timeout(
timeleft(),
asyncupdater(last_info, self.apiroutine)
)
if timeout:
raise TransactionTimeoutException
if r is None:
return
updater, keys = r
try:
await self.transact(keys, updater, withtime, timeleft())
except AsyncTransactionLockException as e:
retry_times += 1
if maxretry is not None and retry_times > maxretry:
raise TransactionRetryExceededException
# Check time left
timeleft()
last_info = e.info
except Exception:
self._logger.debug("Async transaction %r interrupted in %r retries", asyncupdater, retry_times + 1)
raise
else:
self._logger.debug("Async transaction %r done in %r retries", asyncupdater, retry_times + 1)
break
async def writewalk(self, keys, walker, withtime = False, maxtime = 60):
"""
A read-write transaction with walkers
:param keys: initial keys used in walk. Provide keys already known to
be necessary to optimize the transaction.
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walker should be
`walker(walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(walker)
async def _asyncwalker(last_info, container):
return (keys, walker)
return await self.asyncwritewalk(_asyncwalker, withtime, maxtime)
async def asyncwritewalk(self, asyncwalker, withtime = False, maxtime = 60):
"""
A read-write transaction with walker factory
:param asyncwalker: an async function called as `asyncwalker(last_info, container)`
and returns (keys, walker), which
are the same as parameters of `writewalk`
:param keys: initial keys used in walk
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
raise AsyncTransactionLockException in walkers
to restart the transaction
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walkers should be
`walker(key, value, walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(asyncwalker)
async def _asyncupdater(last_info, container):
if last_info is not None:
from_walker, real_info = last_info
if not from_walker:
keys, orig_keys, walker = real_info
else:
r = await asyncwalker(real_info, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
else:
r = await asyncwalker(None, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
@functools.wraps(walker)
def _updater(keys, values, timestamp):
_stored_objs = dict(zip(keys, values))
if self.debuggingupdater:
_stored_old_values = {k: v.jsonencode()
for k,v in zip(keys, values)
if hasattr(v, 'jsonencode')}
# Keys written by walkers
_walker_write_dict = {}
_lost_keys = set()
_used_keys = set()
def _walk(key):
if key not in _stored_objs:
_lost_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
if key not in _walker_write_dict:
_used_keys.add(key)
return _stored_objs[key]
def _write(key, value):
_walker_write_dict[key] = value
_stored_objs[key] = value
try:
if withtime:
walker(_walk, _write, timestamp)
else:
walker(_walk, _write)
except AsyncTransactionLockException as e:
raise AsyncTransactionLockException((True, e.info))
if _lost_keys:
_lost_keys.update(_used_keys)
_lost_keys.update(orig_keys)
raise AsyncTransactionLockException((False, (_lost_keys, orig_keys, walker)))
if self.debuggingupdater:
# Check if there are changes not written
for k, v in _stored_old_values.items():
if k not in _walker_write_dict:
v2 = _stored_objs[k]
assert hasattr(v2, 'jsonencode') and v2.jsonencode() == v
if _walker_write_dict:
return tuple(zip(*_walker_write_dict.items()))
else:
return (), ()
return (_updater, keys)
return await self.asynctransact(_asyncupdater, True, maxtime=maxtime)
|
apache-2.0
| -3,423,474,057,507,028,500
| 50.19209
| 198
| 0.461097
| false
| 4.913774
| false
| false
| false
|
hhursev/recipe-scraper
|
recipe_scrapers/cucchiaio.py
|
1
|
1059
|
from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields
class Cucchiaio(AbstractScraper):
@classmethod
def host(cls):
return "cucchiaio.it"
def author(self):
return self.schema.author()
def title(self):
return self.schema.title()
def total_time(self):
block = self.soup.find("div", {"class": "scheda-ricetta-new"})
if block:
return sum(map(get_minutes, block.findAll("tr")))
return 0
def yields(self):
header = self.soup.find("td", text="PORZIONI")
if header:
value = header.find_next("td")
return get_yields(value)
return None
def image(self):
data = self.soup.find("div", {"class": "auto"}).find("img", {"class": "image"})
if data:
data = data.get("src")
return data
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return None
|
mit
| 4,176,352,788,989,998,000
| 24.214286
| 87
| 0.581681
| false
| 3.702797
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/conversations/async_client.py
|
1
|
26721
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.services.conversations import pagers
from google.cloud.dialogflow_v2.types import conversation
from google.cloud.dialogflow_v2.types import conversation as gcd_conversation
from google.cloud.dialogflow_v2.types import participant
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ConversationsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ConversationsGrpcAsyncIOTransport
from .client import ConversationsClient
class ConversationsAsyncClient:
"""Service for managing
[Conversations][google.cloud.dialogflow.v2.Conversation].
"""
_client: ConversationsClient
DEFAULT_ENDPOINT = ConversationsClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ConversationsClient.DEFAULT_MTLS_ENDPOINT
conversation_path = staticmethod(ConversationsClient.conversation_path)
parse_conversation_path = staticmethod(ConversationsClient.parse_conversation_path)
conversation_profile_path = staticmethod(ConversationsClient.conversation_profile_path)
parse_conversation_profile_path = staticmethod(ConversationsClient.parse_conversation_profile_path)
message_path = staticmethod(ConversationsClient.message_path)
parse_message_path = staticmethod(ConversationsClient.parse_message_path)
common_billing_account_path = staticmethod(ConversationsClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ConversationsClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ConversationsClient.common_folder_path)
parse_common_folder_path = staticmethod(ConversationsClient.parse_common_folder_path)
common_organization_path = staticmethod(ConversationsClient.common_organization_path)
parse_common_organization_path = staticmethod(ConversationsClient.parse_common_organization_path)
common_project_path = staticmethod(ConversationsClient.common_project_path)
parse_common_project_path = staticmethod(ConversationsClient.parse_common_project_path)
common_location_path = staticmethod(ConversationsClient.common_location_path)
parse_common_location_path = staticmethod(ConversationsClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationsAsyncClient: The constructed client.
"""
return ConversationsClient.from_service_account_info.__func__(ConversationsAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationsAsyncClient: The constructed client.
"""
return ConversationsClient.from_service_account_file.__func__(ConversationsAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversationsTransport:
"""Returns the transport used by the client instance.
Returns:
ConversationsTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ConversationsClient).get_transport_class, type(ConversationsClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ConversationsTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the conversations client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversationsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ConversationsClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_conversation(self,
request: gcd_conversation.CreateConversationRequest = None,
*,
parent: str = None,
conversation: gcd_conversation.Conversation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_conversation.Conversation:
r"""Creates a new conversation. Conversations are auto-completed
after 24 hours.
Conversation Lifecycle: There are two stages during a
conversation: Automated Agent Stage and Assist Stage.
For Automated Agent Stage, there will be a dialogflow agent
responding to user queries.
For Assist Stage, there's no dialogflow agent responding to user
queries. But we will provide suggestions which are generated
from conversation.
If
[Conversation.conversation_profile][google.cloud.dialogflow.v2.Conversation.conversation_profile]
is configured for a dialogflow agent, conversation will start
from ``Automated Agent Stage``, otherwise, it will start from
``Assist Stage``. And during ``Automated Agent Stage``, once an
[Intent][google.cloud.dialogflow.v2.Intent] with
[Intent.live_agent_handoff][google.cloud.dialogflow.v2.Intent.live_agent_handoff]
is triggered, conversation will transfer to Assist Stage.
Args:
request (:class:`google.cloud.dialogflow_v2.types.CreateConversationRequest`):
The request object. The request message for
[Conversations.CreateConversation][google.cloud.dialogflow.v2.Conversations.CreateConversation].
parent (:class:`str`):
Required. Resource identifier of the project creating
the conversation. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversation (:class:`google.cloud.dialogflow_v2.types.Conversation`):
Required. The conversation to create.
This corresponds to the ``conversation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, conversation])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcd_conversation.CreateConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if conversation is not None:
request.conversation = conversation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_conversations(self,
request: conversation.ListConversationsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConversationsAsyncPager:
r"""Returns the list of all conversations in the
specified project.
Args:
request (:class:`google.cloud.dialogflow_v2.types.ListConversationsRequest`):
The request object. The request message for
[Conversations.ListConversations][google.cloud.dialogflow.v2.Conversations.ListConversations].
parent (:class:`str`):
Required. The project from which to list all
conversation. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.conversations.pagers.ListConversationsAsyncPager:
The response message for
[Conversations.ListConversations][google.cloud.dialogflow.v2.Conversations.ListConversations].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.ListConversationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_conversations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListConversationsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_conversation(self,
request: conversation.GetConversationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversation.Conversation:
r"""Retrieves the specific conversation.
Args:
request (:class:`google.cloud.dialogflow_v2.types.GetConversationRequest`):
The request object. The request message for
[Conversations.GetConversation][google.cloud.dialogflow.v2.Conversations.GetConversation].
name (:class:`str`):
Required. The name of the conversation. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.GetConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def complete_conversation(self,
request: conversation.CompleteConversationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversation.Conversation:
r"""Completes the specified conversation. Finished
conversations are purged from the database after 30
days.
Args:
request (:class:`google.cloud.dialogflow_v2.types.CompleteConversationRequest`):
The request object. The request message for
[Conversations.CompleteConversation][google.cloud.dialogflow.v2.Conversations.CompleteConversation].
name (:class:`str`):
Required. Resource identifier of the conversation to
close. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.CompleteConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.complete_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_messages(self,
request: conversation.ListMessagesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMessagesAsyncPager:
r"""Lists messages that belong to a given conversation. ``messages``
are ordered by ``create_time`` in descending order. To fetch
updates without duplication, send request with filter
``create_time_epoch_microseconds > [first item's create_time of previous request]``
and empty page_token.
Args:
request (:class:`google.cloud.dialogflow_v2.types.ListMessagesRequest`):
The request object. The request message for
[Conversations.ListMessages][google.cloud.dialogflow.v2.Conversations.ListMessages].
parent (:class:`str`):
Required. The name of the conversation to list messages
for. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.conversations.pagers.ListMessagesAsyncPager:
The response message for
[Conversations.ListMessages][google.cloud.dialogflow.v2.Conversations.ListMessages].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.ListMessagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_messages,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMessagesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ConversationsAsyncClient",
)
|
apache-2.0
| -8,202,350,069,784,333,000
| 41.7536
| 138
| 0.6258
| false
| 4.711867
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.