repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ryanss/holidays.py | holidays/countries/ethiopia.py | Python | mit | 5,162 | 0 | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from calendar import isleap
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import SAT, SUN
from holidays.constants import JAN, MAR, MAY, SEP
from holidays.holiday_base import HolidayBase
from holidays.utils import islamic_to_gre
WEEKEND = (SAT, SUN)
# Ethiopian holidays are estimated: it is common for the day to be pushed
# if falls in a weekend, although not a rule that can be implemented.
# Holidays after 2020: the following four moving date holidays whose exact
# date is announced yearly are estimated (and so denoted):
# - Eid El Fetr*
# - Eid El Adha*
# - Ara | fat Day*
# - Moulad El Naby*
# | *only if hijri-converter library is installed, otherwise a warning is
# raised that this holiday is missing. hijri-converter requires
# Python >= 3.6
# is_weekend function is there, however not activated for accuracy.
class Ethiopia(HolidayBase):
country = "ET"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
# The Ethiopian New Year is called Kudus Yohannes in Ge'ez and
# Tigrinya, while in Amharic,
# the official language of Ethiopia it is called Enkutatash.
# It occurs on September 11 in the Gregorian Calendar;
# except for the year preceding a leap year, when it occurs on
# September 12.
if self.ethiopian_isleap(year):
self[date(year, SEP, 12)] = "አዲስ ዓመት እንቁጣጣሽ/Ethiopian New Year"
else:
self[date(year, SEP, 11)] = "አዲስ ዓመት እንቁጣጣሽ/Ethiopian New Year"
# Finding of true cross
if self.ethiopian_isleap(year):
self[date(year, SEP, 28)] = "መስቀል/Finding of True Cross"
else:
self[date(year, SEP, 27)] = "መስቀል/Finding of True Cross"
# Ethiopian Christmas
self[date(year, JAN, 7)] = "ገና/Ethiopian X-Mas"
# Ethiopian Ephiphany
self[date(year, JAN, 19)] = "ጥምቀት/Ephiphany"
# Ethiopian Good Friday
self[easter(year, 2) - rd(days=2)] = "ስቅለት/Ethiopian Good Friday"
# Ethiopian Easter - Orthodox Easter
self[easter(year, 2)] = "ፋሲካ/Ethiopian Easter"
# Adwa Victory Day
if year > 1896:
self[date(year, MAR, 2)] = "አድዋ/Victory of Adwa"
# Labour Day
self[date(year, MAY, 1)] = "የሰራተኞች ቀን/Labor Day"
# Patriots Day
if year > 1941:
self[date(year, MAY, 5)] = "የአርበኞች ቀን/Patriots Day"
# Derg Downfall Day
if year > 1991:
self[
date(year, MAY, 28)
] = "ደርግ የወደቀበት ቀን/Downfall of Dergue regime"
# Downfall of King. Hailesilassie
if year < 1991 and year > 1974:
if self.ethiopian_isleap(year):
self[date(year, SEP, 13)] = "ደርግ የመጣበት ቀን/Formation of Dergue"
else:
self[date(year, SEP, 12)] = "ደርግ የመጣበት ቀን/Formation of Dergue"
# Eid al-Fitr - Feast Festive
# date of observance is announced yearly, This is an estimate since
# having the Holiday on Weekend does change the number of days,
# deceided to leave it since marking a Weekend as a holiday
# wouldn't do much harm.
for date_obs in islamic_to_gre(year, 10, 1):
hol_date = date_obs
self[hol_date] = "ኢድ አልፈጥር/Eid-Al-Fitr"
# Eid al-Adha - Scarfice Festive
# date of observance is announced yearly
for date_obs in islamic_to_gre(year, 12, 9):
hol_date = date_obs
self[hol_date + rd(days=1)] = "አረፋ/Eid-Al-Adha"
# Prophet Muhammad's Birthday - (hijari_year, 3, 12)
for date_obs in islamic_to_gre(year, 3, 12):
hol_date = date_obs
self[hol_date + rd(days=1)] = "መውሊድ/Prophet Muhammad's Birthday"
# Ethiopian leap years are coincident with leap years in the Gregorian
# calendar until the end of February 2100. It starts earlier from new year
# of western calendar.
# Ethiopian leap year starts on Sep 11, so it has an effect on
# holidays between Sep 11 and Jan 1. Therefore, here on the following
# function we intentionally add 1 to the leap year to offset the difference
def ethiopian_isleap(self, year):
return isleap(year + 1)
class ET(Ethiopia):
pass
class ETH(Ethiopia):
pass
|
danbar/qr_decomposition | qr_decomposition/tests/test_householder_reflection.py | Python | mit | 1,155 | 0 | """
Python unit-test
"""
import unittest
import numpy as np
import numpy.testing as npt
from .. import qr_decomposition
class TestHouseholderReflection(unittest.TestCase):
"""Test case for QR decomposition using Householder reflection."""
def test_wikipedia_example1( | self):
"""Test of Wikipedia example
The example for the foll | owing QR decomposition is taken from
https://en.wikipedia.org/wiki/Qr_decomposition#Example_2.
"""
A = np.array([[12, -51, 4],
[6, 167, -68],
[-4, 24, -41]], dtype=np.float64)
(Q, R) = qr_decomposition.householder_reflection(A)
Q_desired = np.array([[0.8571, -0.3943, 0.3314],
[0.4286, 0.9029, -0.0343],
[-0.2857, 0.1714, 0.9429]], dtype=np.float64)
R_desired = np.array([[14, 21, -14],
[0, 175, -70],
[0, 0, -35]], dtype=np.float64)
npt.assert_almost_equal(Q, Q_desired, 4)
npt.assert_almost_equal(R, R_desired, 4)
if __name__ == "__main__":
unittest.main()
|
mschurenko/ansible-modules-core | cloud/amazon/rds.py | Python | gpl-3.0 | 40,638 | 0.005807 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
| version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only rep | lica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take.
required: true
default: null
aliases: []
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
aliases: []
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
aliases: []
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
aliases: []
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
aliases: []
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
aliases: []
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
aliases: []
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
aliases: []
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
aliases: []
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
aliases: []
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
aliases: []
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
aliases: []
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
aliases: []
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
aliases: []
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
aliases: []
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
aliases: []
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
aliases: []
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
aliases: []
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
aliases: []
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how lon |
nickoala/telepot | telepot/aio/api.py | Python | mit | 5,062 | 0.003951 | import asyncio
import aiohttp
import async_timeout
import atexit
import re
import json
from .. import exception
from ..api import _methodurl, _which_pool, _fileurl, _guess_filename
_loop = asyncio.get_event_loop()
_pools = {
'default': aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=10),
loop=_loop)
}
_timeout = 30
_proxy = None # (url, (username, password))
def set_proxy(url, basic_auth=None):
global _proxy
if not url:
_proxy = None
else:
_proxy = (url, basic_auth) if basic_auth else (url,)
def _proxy_kwargs():
if _proxy is None or len(_proxy) == 0:
return {}
elif len(_proxy) == 1:
return {'proxy': _proxy[0]}
elif len(_proxy) == 2:
return {'proxy': _proxy[0], 'proxy_auth': aiohttp.BasicAuth(*_proxy[1] | )}
else:
raise RuntimeError("_proxy has invalid length")
async def _close_pools():
global _pools
for s in _pools.values():
await s.close()
atexit.register(lambda: _loop.create_task(_close_pools())) # have to wrap async function
def _create_onetime_pool():
return aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=1, force_close=True),
loop=_loop)
def _default_timeout(req, **user_kw):
return _timeout
def _compose_timeo | ut(req, **user_kw):
token, method, params, files = req
if method == 'getUpdates' and params and 'timeout' in params:
# Ensure HTTP timeout is longer than getUpdates timeout
return params['timeout'] + _default_timeout(req, **user_kw)
elif files:
# Disable timeout if uploading files. For some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished).
# It is unclear how long timeout should be.
return None
else:
return _default_timeout(req, **user_kw)
def _compose_data(req, **user_kw):
token, method, params, files = req
data = aiohttp.FormData()
if params:
for key,value in params.items():
data.add_field(key, str(value))
if files:
for key,f in files.items():
if isinstance(f, tuple):
if len(f) == 2:
filename, fileobj = f
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = _guess_filename(f) or key, f
data.add_field(key, fileobj, filename=filename)
return data
def _transform(req, **user_kw):
timeout = _compose_timeout(req, **user_kw)
data = _compose_data(req, **user_kw)
url = _methodurl(req, **user_kw)
name = _which_pool(req, **user_kw)
if name is None:
session = _create_onetime_pool()
cleanup = session.close # one-time session: remember to close
else:
session = _pools[name]
cleanup = None # reuse: do not close
kwargs = {'data':data}
kwargs.update(user_kw)
return session.post, (url,), kwargs, timeout, cleanup
async def _parse(response):
try:
data = await response.json()
if data is None:
raise ValueError()
except (ValueError, json.JSONDecodeError, aiohttp.ClientResponseError):
text = await response.text()
raise exception.BadHTTPResponse(response.status, text, response)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
# Look for specific error ...
for e in exception.TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n*[description], n*[re.IGNORECASE])):
raise e(description, error_code, data)
# ... or raise generic error
raise exception.TelegramError(description, error_code, data)
async def request(req, **user_kw):
fn, args, kwargs, timeout, cleanup = _transform(req, **user_kw)
kwargs.update(_proxy_kwargs())
try:
if timeout is None:
async with fn(*args, **kwargs) as r:
return await _parse(r)
else:
try:
with async_timeout.timeout(timeout):
async with fn(*args, **kwargs) as r:
return await _parse(r)
except asyncio.TimeoutError:
raise exception.TelegramError('Response timeout', 504, {})
except aiohttp.ClientConnectionError:
raise exception.TelegramError('Connection Error', 400, {})
finally:
if cleanup: # e.g. closing one-time session
if asyncio.iscoroutinefunction(cleanup):
await cleanup()
else:
cleanup()
def download(req):
session = _create_onetime_pool()
kwargs = {}
kwargs.update(_proxy_kwargs())
return session, session.get(_fileurl(req), timeout=_timeout, **kwargs)
# Caller should close session after download is complete
|
francislpx/myblog | blog/admin.py | Python | gpl-3.0 | 297 | 0.003367 | from django.contrib import admin
from .models import Post, Category, Tag
class PostAdmin(admin.ModelAdmin):
list_disp | lay = ['title', 'create_time', 'modified_time', 'category', 'author', 'views']
admin.site.register(Post, PostAdmin)
admin.site.regi | ster(Category)
admin.site.register(Tag)
|
atumanov/ray | python/ray/rllib/models/model.py | Python | apache-2.0 | 10,512 | 0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import gym
from ray.rllib.models.misc import linear, normc_initializer
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@PublicAPI
class Model(object):
"""Defines an abstract network model for use with RLlib.
Models convert input tensors to a number of output features. These features
can then be interpreted by ActionDistribution classes to determine
e.g. agent action values.
The last layer of the network can also be retrieved if the algorithm
needs to further post-processing (e.g. Actor and Critic networks in A3C).
Attributes:
input_dict (dict): Dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training".
outputs (Tensor): The output vector of this model, of shape
[BATCH_SIZE, num_outputs].
last_layer (Tensor): The feature layer right before the model output,
of shape [BATCH_SIZE, f].
state_init (list): List of initial recurrent state tensors (if any).
state_in (list): List of input recurrent state tensors (if any).
state_out (list): List of output recurrent state tensors (if any).
seq_lens (Tensor): The tensor input for RNN sequence lengths. This
defaults to a Tensor of [1] * len(batch) in the non-RNN case.
If `options["free_log_std"]` is True, the last half of the
output layer will be free variables that are not dependent on
inputs. This is often used if the output of the network is used
to parametrize a probability distribution. In this case, the
first half of the parameters can be interpreted as a location
parameter (like a mean) and the second half can be interpreted as
a scale parameter (like a standard deviation).
"""
def __init__(self,
input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=None,
seq_lens=None):
assert isinstance(input_dict, dict), input_dict
| # Default attribute values for the non-RNN case
self.state_init = []
self.state_in = state_in or []
self.state_out = []
self.obs_spac | e = obs_space
self.action_space = action_space
self.num_outputs = num_outputs
self.options = options
self.scope = tf.get_variable_scope()
self.session = tf.get_default_session()
if seq_lens is not None:
self.seq_lens = seq_lens
else:
self.seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
self._num_outputs = num_outputs
if options.get("free_log_std"):
assert num_outputs % 2 == 0
num_outputs = num_outputs // 2
try:
restored = input_dict.copy()
restored["obs"] = restore_original_dimensions(
input_dict["obs"], obs_space)
self.outputs, self.last_layer = self._build_layers_v2(
restored, num_outputs, options)
except NotImplementedError:
self.outputs, self.last_layer = self._build_layers(
input_dict["obs"], num_outputs, options)
if options.get("free_log_std", False):
log_std = tf.get_variable(
name="log_std",
shape=[num_outputs],
initializer=tf.zeros_initializer)
self.outputs = tf.concat(
[self.outputs, 0.0 * self.outputs + log_std], 1)
def _build_layers(self, inputs, num_outputs, options):
"""Builds and returns the output and last layer of the network.
Deprecated: use _build_layers_v2 instead, which has better support
for dict and tuple spaces.
"""
raise NotImplementedError
@PublicAPI
def _build_layers_v2(self, input_dict, num_outputs, options):
"""Define the layers of a custom model.
Arguments:
input_dict (dict): Dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training".
num_outputs (int): Output tensor must be of size
[BATCH_SIZE, num_outputs].
options (dict): Model options.
Returns:
(outputs, feature_layer): Tensors of size [BATCH_SIZE, num_outputs]
and [BATCH_SIZE, desired_feature_size].
When using dict or tuple observation spaces, you can access
the nested sub-observation batches here as well:
Examples:
>>> print(input_dict)
{'prev_actions': <tf.Tensor shape=(?,) dtype=int64>,
'prev_rewards': <tf.Tensor shape=(?,) dtype=float32>,
'is_training': <tf.Tensor shape=(), dtype=bool>,
'obs': OrderedDict([
('sensors', OrderedDict([
('front_cam', [
<tf.Tensor shape=(?, 10, 10, 3) dtype=float32>,
<tf.Tensor shape=(?, 10, 10, 3) dtype=float32>]),
('position', <tf.Tensor shape=(?, 3) dtype=float32>),
('velocity', <tf.Tensor shape=(?, 3) dtype=float32>)]))])}
"""
raise NotImplementedError
@PublicAPI
def value_function(self):
"""Builds the value function output.
This method can be overridden to customize the implementation of the
value function (e.g., not sharing hidden layers).
Returns:
Tensor of size [BATCH_SIZE] for the value function.
"""
return tf.reshape(
linear(self.last_layer, 1, "value", normc_initializer(1.0)), [-1])
@PublicAPI
def custom_loss(self, policy_loss, loss_inputs):
"""Override to customize the loss function used to optimize this model.
This can be used to incorporate self-supervised losses (by defining
a loss over existing input and output tensors of this model), and
supervised losses (by defining losses over a variable-sharing copy of
this model's layers).
You can find an runnable example in examples/custom_loss.py.
Arguments:
policy_loss (Tensor): scalar policy loss from the policy.
loss_inputs (dict): map of input placeholders for rollout data.
Returns:
Scalar tensor for the customized loss for this model.
"""
if self.loss() is not None:
raise DeprecationWarning(
"self.loss() is deprecated, use self.custom_loss() instead.")
return policy_loss
@PublicAPI
def custom_stats(self):
"""Override to return custom metrics from your model.
The stats will be reported as part of the learner stats, i.e.,
info:
learner:
model:
key1: metric1
key2: metric2
Returns:
Dict of string keys to scalar tensors.
"""
return {}
def loss(self):
"""Deprecated: use self.custom_loss()."""
return None
def _validate_output_shape(self):
"""Checks that the model has the correct number of outputs."""
try:
out = tf.convert_to_tensor(self.outputs)
shape = out.shape.as_list()
except Exception:
raise ValueError("Output is not a tensor: {}".format(self.outputs))
else:
if len(shape) != 2 or shape[1] != self._num_outputs:
raise ValueError(
"Expected output shape of [None, {}], got {}".format(
self._num_outputs, shape))
@DeveloperAPI
def restore_original_dimensions(obs, obs_space, tensorlib=tf):
"""Unpacks Dict and Tuple space observations into their original form.
This is needed since we flatten Dict and Tuple observations in transit.
Bef |
pld/bamboo | bamboo/tests/controllers/test_datasets_update_with_aggs.py | Python | bsd-3-clause | 2,876 | 0 | import simplejson as json
from bamboo.models.dat | aset import Dataset
from bamboo.tests.controllers.test_abstract_datasets_update import\
TestAbstractDatasetsUpdate
class TestDatasetsUpdateWithAggs(TestAbstractDatasetsUpdate):
def setUp(self):
TestAbstractDatasetsUpdate.setUp(self)
self._create_original_datasets()
self._add_common_calculations()
# create linked datasets
aggregations = {
'max(amount)': 'max of amount',
'mean(amount)': 'mean of amount',
'medi | an(amount)': 'median of amount',
'min(amount)': 'min of amount',
'ratio(amount, gps_latitude)': 'ratio of amount and gps_latitude',
'sum(amount)': 'sum of amount',
}
for aggregation, name in aggregations.items():
self.calculations.create(
self.dataset2_id, aggregation, name)
# and with group
for aggregation, name in aggregations.items():
self.calculations.create(
self.dataset2_id, aggregation, name, group='food_type')
result = json.loads(
self.controller.aggregations(self.dataset2_id))
self.linked_dataset1_id = result['']
# create merged datasets
result = json.loads(self.controller.merge(dataset_ids=json.dumps(
[self.dataset1_id, self.dataset2_id])))
self.merged_dataset1_id = result[Dataset.ID]
result = json.loads(self.controller.merge(dataset_ids=json.dumps(
[self.merged_dataset1_id, self.linked_dataset1_id])))
self.merged_dataset2_id = result[Dataset.ID]
def test_setup_datasets(self):
self._verify_dataset(
self.dataset1_id,
'updates_with_aggs/originals/dataset1.pkl')
self._verify_dataset(
self.dataset2_id,
'updates_with_aggs/originals/dataset2.pkl')
self._verify_dataset(
self.linked_dataset1_id,
'updates_with_aggs/originals/linked_dataset1.pkl')
self._verify_dataset(
self.merged_dataset1_id,
'updates_with_aggs/originals/merged_dataset1.pkl')
self._verify_dataset(
self.merged_dataset2_id,
'updates_with_aggs/originals/merged_dataset2.pkl')
def test_datasets_update(self):
self._put_row_updates(self.dataset2_id)
self._verify_dataset(
self.dataset2_id,
'updates_with_aggs/update/dataset2.pkl')
self._verify_dataset(
self.merged_dataset1_id,
'updates_with_aggs/update/merged_dataset1.pkl')
self._verify_dataset(
self.linked_dataset1_id,
'updates_with_aggs/update/linked_dataset1.pkl')
self._verify_dataset(
self.merged_dataset2_id,
'updates_with_aggs/update/merged_dataset2.pkl')
|
sebasmagri/mezzanine_polls | mezzanine_polls/admin.py | Python | bsd-2-clause | 271 | 0.00369 | from django.contrib import a | dmin
from mezzanine.pages.admin import PageAdmin
from .models import Poll, Choice
class ChoiceInline(admin.TabularInline):
model = | Choice
class PollAdmin(PageAdmin):
inlines = (ChoiceInline, )
admin.site.register(Poll, PollAdmin)
|
google-research/google-research | reset_free_learning/reset_free.py | Python | apache-2.0 | 43,220 | 0.004766 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
r"""Train and Eval SAC.
"""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
from six.moves import range
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.agents.ddpg import actor_network
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
from tf_agents.utils import nest_utils
from reset_free_learning import state_distribution_distance
from reset_free_learning.agents_with_value_functions import SacAgent
from reset_free_learning.agents_with_value_functions import Td3Agent
from reset_free_learning.envs import reset_free_wrapper
from reset_free_learning.reset_goal_generator import FixedResetGoal
from reset_free_learning.reset_goal_generator import ResetGoalGenerator
from reset_free_learning.reset_goal_generator import ScheduledResetGoal
from reset_free_learning.utils.env_utils import get_env
from reset_free_learning.utils.other_utils import copy_replay_buffer
from reset_free_learning.utils.other_utils import np_custom_load
from reset_free_learning.utils.other_utils import np_custom_save
from reset_free_learning.utils.other_utils import record_video
from reset_free_learning.utils.other_utils import std_clip_transform
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('offline_dir', None,
'Directory for loading replay buffers.')
flags.DEFINE_integer(
'max_episode_steps', 1000,
'maximum number of steps in the environment before a full reset is done')
flags.DEFINE_integer('eval_episode_steps', 1000,
'maximum number of steps in the evaluation episode')
flags.DEFINE_string('env_name', 'sawyer_push',
'name of the environment to be loaded')
flags.DEFINE_integer('random_seed', None, 'random seed')
flags.DEFINE_integer('video_record_interval', 50000, 'video interval')
flags.DEFINE_integer('num_videos_per_interval', 0,
'number of videos recording in each video evaluation')
flags.DEFINE_string('reward_type', 'dense', 'reward type for the environment')
# env reset-wrapper properties
flags.DEFINE_integer(
'num_success_states', 1,
'Number of successful states in a sequence for the reset to be considered complete'
)
flags.DEFINE_integer(
'variable_reset_horizon', 0,
'Number of successful states in a sequence for the reset to be considered complete'
)
# train hyperparameters
flags.DEFINE_integer('num_iterations', 3000000, 'number of iterations')
flags.DEFINE_integer(
'reset_goal_frequency', 400,
'virtual episode size, only the goal/task is reset, that is no human intervention is required'
)
flags.DEFINE_integer('initial_collect_steps', 10000, 'number of iterations')
flags.DEFINE_integer('batch_size', 256,
'Batch size for updating agent from replay buffer')
flags.DEFINE_integer('collect_steps_per_iteration', 1,
'number of steps collected per iteration')
flags.DEFINE_integer('train_steps_per_iteration', 1,
' number of train steps per iteration')
# agent hyperparameters
flags.DEFINE_string('agent_type', 'sac', 'type of agent to use for training')
flags.DEFINE_list('actor_net_size', [256, 256], 'layer size values of actor')
flags.DEFINE_list('critic_net_size', [256, 256],
'layer size values for the critic')
flags.DEFINE_float('reward_scale_factor', 0.1, 'reward scale factor, alpha')
flags.DEFINE_float('actor_learning_rate', 3e-4, 'learning rate for the actor')
flags.DEFINE_float('critic_learning_rate', 3e-4, 'learning rate for the critic')
flags.DEFINE_float('discount_factor', 0.99,
'discount factor for the reward optimization')
flags.DEFINE_integer('replay_buffer_capacity', int(1e6),
'capacity of the replay buffer')
# TD3 hyperparameters
flags.DEFINE_float('exploration_noise_std', 0.1, 'exploration noise')
# SAC hyperparmater
flags.DEFINE_float('alpha_learning_rate', 3e-4,
'learning rate for the soft policy parameter')
# reset-free hyperparameters
flags.DEFINE_integer(
'use_reset_goals', 1,
"""-1-> oracle-reset, terminate on goal success
0-> reset-free, no reset goals
1-> reset-free, use reset goals, reset goals can be variable or fixed
2-> oracle-reset, teleport to reset goal
3-> reset-free, goal termination of episodes, reset goals can be variable or learned
4-> reset-free wrapper with goal termination, scheduled reset goals
5-> oracle-reset, goal termination of episodes, scheduled reset goals
6-> oracle-reset, the forward goal is variable (do not use)
7-> oracle-reset, reset goal is variable
""")
flags.DEFINE_integer(
'num_action_samples', 1,
'used for approximating the value function from the critic function')
flags.DEFINE_integer('num_reset_candidates', 1000,
'number of candidate states for reset')
flags.DEFINE_float('reset_lagrange_learning_rate', 3e-4,
'learning rate for the lagrange_multiplier')
flags.DEFINE_float('value_threshold', 1000, 'value threshold')
flags.DEFINE_float('lagrange_max', np.inf, 'upper bound for lagrange variable')
flags.DEFINE_integer('use_minimum', 1,
'the choice of value function used in reverse curriculum')
flags.DEFINE_integer('use_no_entropy_q', 0,
'use the other q function to select reset goals')
# relabel data
flags.DEFINE_integer('relabel_goals', 0,
'add goal relabelling for optimiza | tion')
flags.DEFINE_integer('goal_relabel_type', 0, '0->final, 1-> random future')
flags.DEFINE_integer( |
'num_relabelled_goals', 5,
'number of relabelled goals per episode, use with random future goal relabelling'
)
flags.DEFINE_integer('relabel_offline_data', 0,
'relabel with every intermediate state as a goal')
# point mass environment properties
flags.DEFINE_string('point_mass_env_type', 'default',
'environment configuration')
flags.DEFINE_string('playpen_task', 'rc_o', 'environment configuration')
flags.DEFINE_integer('reset_at_goal', 0,
'initialize reset-free environment at the goal')
# debug
flags.DEFINE_integer('debug_value_fn_for_reset', 0,
'print lagrange variane of reset goal')
flags.DEFINE_integer('num_chunks', 10,
'number of chunks to split the offline trajectory')
flags.DEFINE_integer('num_success_for_switch', 10,
'number of success to switch to next goal')
FLAGS = flags.FLAGS
def relabel_function(cur_episode, last_step, reward_fn, full_buffer):
all_data = cur_episode.gather_all()
# add all actual interaction to the replay buffer
all_data = nest_utils.unbatch_nested_tensors(all_data)
for cur_trajectory in nest_utils.u |
ESOedX/edx-platform | lms/djangoapps/program_enrollments/tests/factories.py | Python | agpl-3.0 | 1,400 | 0 | """
Factories for Program Enrollment tests.
"""
from __future__ import absolute_import
from uuid import uuid4
| import factory
from factory.django import DjangoModelFactory
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.program_enrollments import models
from student.tests.factories impor | t CourseEnrollmentFactory, UserFactory
class ProgramEnrollmentFactory(DjangoModelFactory):
""" A Factory for the ProgramEnrollment model. """
class Meta(object):
model = models.ProgramEnrollment
user = factory.SubFactory(UserFactory)
external_user_key = None
program_uuid = factory.LazyFunction(uuid4)
curriculum_uuid = factory.LazyFunction(uuid4)
status = 'enrolled'
PROGRAM_COURSE_ENROLLMENT_DEFAULT_COURSE_KEY = (
CourseKey.from_string("course-v1:edX+DemoX+Demo_Course")
)
class ProgramCourseEnrollmentFactory(DjangoModelFactory):
""" A factory for the ProgramCourseEnrollment model. """
class Meta(object):
model = models.ProgramCourseEnrollment
program_enrollment = factory.SubFactory(ProgramEnrollmentFactory)
course_enrollment = factory.SubFactory(CourseEnrollmentFactory)
course_key = factory.LazyAttribute(
lambda pce: (
pce.course_enrollment.course_id
if pce.course_enrollment
else PROGRAM_COURSE_ENROLLMENT_DEFAULT_COURSE_KEY
)
)
status = 'active'
|
matrix-org/synapse | synapse/handlers/room.py | Python | apache-2.0 | 64,472 | 0.001411 | # Copyright 2016-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions for performing actions on rooms."""
import itertools
import logging
import math
impor | t random
import string
from collections import OrderedDict
from ty | ping import (
TYPE_CHECKING,
Any,
Awaitable,
Collection,
Dict,
List,
Optional,
Tuple,
)
import attr
from typing_extensions import TypedDict
from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
HistoryVisibility,
JoinRules,
Membership,
RoomCreationPreset,
RoomEncryptionAlgorithms,
RoomTypes,
)
from synapse.api.errors import (
AuthError,
Codes,
HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
SynapseError,
)
from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_power_levels_contents
from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.federation import get_domains_from_state
from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.databases.main.relations import BundledAggregations
from synapse.storage.state import StateFilter
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
MutableStateMap,
Requester,
RoomAlias,
RoomID,
RoomStreamToken,
StateMap,
StreamToken,
UserID,
create_requester,
)
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
FIVE_MINUTES_IN_MS = 5 * 60 * 1000
@attr.s(slots=True, frozen=True, auto_attribs=True)
class EventContext:
events_before: List[EventBase]
event: EventBase
events_after: List[EventBase]
state: List[EventBase]
aggregations: Dict[str, BundledAggregations]
start: str
end: str
class RoomCreationHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.hs = hs
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self.config = hs.config
self.request_ratelimiter = hs.get_request_ratelimiter()
# Room state based off defined presets
self._presets_dict: Dict[str, Dict[str, Any]] = {
RoomCreationPreset.PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": False,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": True,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": False,
"guest_can_join": False,
"power_level_content_override": {},
},
}
# Modify presets to selectively enable encryption by default per homeserver config
for preset_name, preset_config in self._presets_dict.items():
encrypted = (
preset_name
in self.config.room.encryption_enabled_by_default_for_room_presets
)
preset_config["encrypted"] = encrypted
self._replication = hs.get_replication_data_handler()
# linearizer to stop two upgrades happening at once
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
# If a user tries to update the same room multiple times in quick
# succession, only process the first attempt and return its result to
# subsequent requests
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
)
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.third_party_event_rules = hs.get_third_party_event_rules()
async def upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
) -> str:
"""Replace a room with a new room with a different version
Args:
requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced
new_version: the new room version to use
Returns:
the new room id
Raises:
ShadowBanError if the requester is shadow-banned.
"""
await self.request_ratelimiter.ratelimit(requester)
user_id = requester.user.to_string()
# Check if this room is already being upgraded by another person
for key in self._upgrade_response_cache.keys():
if key[0] == old_room_id and key[1] != user_id:
# Two different people are trying to upgrade the same room.
# Send the second an error.
#
# Note that this of course only gets caught if both users are
# on the same homeserver.
raise SynapseError(
400, "An upgrade for this room is currently in progress"
)
# Upgrade the room
#
# If this user has sent multiple upgrade requests for the same room
# and one of them is not complete yet, cache the response and
# return it to all subsequent requests
ret = await self._upgrade_response_cache.wrap(
(old_room_id, user_id),
self._upgrade_room,
requester,
old_room_id,
new_version, # args for _upgrade_room
)
return ret
async def _upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
) -> str:
"""
Args:
requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced
new_versions: the version to upgrade the room to
Raises:
ShadowBanError if the requester is shadow-banned.
"""
user_id = requester.user.to_string()
assert self.hs.is_mine_id(user_id), "User must be our own: %s" % (user_id,)
# start by allocating a new room id
r = await self.store.get_room(old_room_id)
if r is None:
raise NotFoundError("Unknown room id %s" % (old_room_id,))
new_room_id = await self._generate_room_id(
creator_id=user_id,
is_public=r["is_public"],
room_version=new_version,
)
logger.info("Creati |
pernici/sympy | sympy/functions/special/tests/test_spec_polynomials.py | Python | bsd-3-clause | 3,248 | 0.009544 | from sympy import (legendre, Symbol, hermite, chebyshevu, chebyshevt,
chebyshevt_root, chebyshevu_root, assoc_legendre, Rational,
roots, sympify, S, laguerre_l, laguerre_poly)
x = Symbol('x')
def test_legendre():
assert legendre(0, x) == 1
assert legendre(1, x) == x
assert legendre(2, x) == ((3*x**2-1)/2).expand()
assert legendre(3, x) == ((5*x**3-3*x)/2).expand()
assert legendre(4, x) == ((35*x**4-30*x**2+3)/8).expand()
assert legendre(5, x) == ((63*x**5-70*x**3+15*x)/8).expand()
assert legendre(6, x) == ((231*x**6-315*x**4+105*x**2-5)/16).expand()
assert legendre(10, -1) == 1
assert legendre(11, -1) == -1
assert legendre(10, 1) == 1
assert legendre(11, 1) == 1
assert legendre(10, 0) != 0
assert legendre(11, 0) == 0
assert roots(legendre(4,x), x) == {
(Rational(3, 7) - Rational(2, 35)*30**S.Half)**S.Half: 1,
-(Rational(3, 7) - Rational(2, 35)*30**S.Half)**S.Half: 1,
(Rational(3, 7) + Rational(2, 35)*30**S.Half)**S.Half: 1,
-(Rational(3, 7) + Rational(2, 35)*30**S.Half)**S.Half: 1,
}
def test_assoc_legendre():
Plm=assoc_legendre
Q=(1-x**2)**Rational(1,2)
assert Plm(0, 0, x) == 1
assert Plm(1, 0, x) == x
assert Plm(1, 1, x) == -Q
assert Plm(2, 0, x) == (3*x**2-1)/2
assert Plm(2, 1, x) == -3*x*Q
assert Plm(2, 2, x) == 3*Q**2
assert Plm(3, 0, x) == (5*x**3-3*x)/2
assert Plm(3, 1, x).expand() == (( 3*(1-5*x**2)/2 ).expand() * Q).expand()
assert Plm(3, 2, x) == 15*x * Q**2
assert Plm(3, 3, x) == -15 * Q**3
# negative m
assert Plm(1,-1, x) == -Plm(1, 1, x)/2
assert Plm(2,-2, x) == Plm(2, 2, x)/24
assert Plm(2,-1, x) == -Plm(2, 1, x)/6
assert Plm(3,-3, x) == -Plm(3, 3, x)/720
assert Plm(3,-2, x) == Plm(3, 2, x)/120
assert Plm(3,-1, x) == -Plm(3, 1, x)/12
def test_chebyshev():
assert chebyshevt(0, x) == 1
assert chebyshevt(1, x) == x
assert chebyshevt(2, x) == 2*x**2-1
assert chebyshevt(3, x) == 4*x**3-3*x
for n in range(1, 4):
for k in range(n):
z = chebyshevt_root(n, k)
assert chebyshevt(n, z) == 0
for n in range(1, 4):
for k in range(n):
z = chebyshevu_root(n, k)
assert chebyshevu(n, z) == 0
def test_hermite():
assert hermite(6, x) == 64*x**6 - 480*x**4 + 720*x**2 - 120
def test_laguerre():
alpha = Symbol("alpha")
# | generalized Laguerre polynomials:
assert laguerre_l(0, alpha, x) == 1
assert laguerre_l(1, alpha, x) == -x + alpha + 1
assert laguerre_l(2, alpha, x).expand() == (x**2/2 - (alpha+2)*x + (alpha+2)*(alpha+1)/2).expand()
assert lague | rre_l(3, alpha, x).expand() == (-x**3/6 + (alpha+3)*x**2/2 - (alpha+2)*(alpha+3)*x/2 + (alpha+1)*(alpha+2)*(alpha+3)/6).expand()
# Laguerre polynomials:
assert laguerre_l(0, 0, x) == 1
assert laguerre_l(1, 0, x) == 1 - x
assert laguerre_l(2, 0, x).expand() == 1 - 2*x + x**2/2
assert laguerre_l(3, 0, x).expand() == 1 - 3*x + 3*x**2/2 - x**3/6
# Test the lowest 10 polynomials with laguerre_poly, to make sure that it
# works:
for i in range(10):
assert laguerre_l(i, 0, x).expand() == laguerre_poly(i, x)
|
btenaglia/hpc-historias-clinicas | hpc-historias-clinicas/epicrisis/migrations/0003_auto_20150510_1157.py | Python | bsd-3-clause | 469 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db i | mport models, migrations
class Migration(migrations.Migration):
dependencies = [
('epicrisis', '0002_auto_20150510_1155'),
]
operations = [
migrations.AlterField(
model_name='epicrisis',
name='historia',
field=models.ForeignKey(to='historias.Historias', unique=True),
preserve_default=True,
| ),
]
|
duskat/python_training_mantis | test/test_login.py | Python | apache-2.0 | 147 | 0.013605 | __author__ = | 'Dzmitry'
def test_login(app):
app.session.login("administrator", "root | ")
assert app.session.is_logged_in_as("administrator") |
giliam/turbo-songwriter | backend/songwriter/urls.py | Python | mit | 5,095 | 0.001963 | # coding: utf-8
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from songwriter import views
urlpatterns = [
url(r'^$', views.api_root,
name="root"),
url(r'^songs/list/$', views.SongList.as_view(),
name="songs_list"),
url(r'^songs/list/paginate/$', views.SongListPaginate.as_view(),
name="songs_list_paginate"),
url(r'^songs/(?P<pk>[0-9]+)/$', views.SongDetail.as_view(),
name="songs_detail"),
url(r'^songs/fast/data/(?P<song_id>[0-9]+)/$', views.get_song_details,
name="songs_fast_data"),
url(r'^song/convert/to/tex/(?P<song_id>[0-9]+)/$', views.convert_to_tex,
name="song_convert_to_tex"),
url(r'^song/edit/tex/(?P<song_id>[0-9]+)/$', views.edit_tex,
name="song_convert_to_tex"),
url(r'^song/edit/multiple/tex/(?P<songs_ids>[\/0-9]+|all)/$', views.edit_multiple_songs_tex,
name="song_edit_multiple_songs_tex"),
url(r'^song/compile/tex/(?P<song_id>[0-9]+)/$', views.compile_tex,
name="song_compile_tex"),
url(r'^songs/guess/pages/(?P<songs_ids>[\/0-9]+|all)/$', views.guess_pages_numbers,
name="songs_guess_pages_numbers"),
url(r'^song/new/with/verses/$', views.add_song_with_verses,
name="song_add_with_verses"),
url(r'^songs/without/author/$', views.get_songs_withou | t_author,
name="songs_without_author"),
url(r'^songs/without/editor/$', views.get_ | songs_without_editor,
name="songs_without_editor"),
url(r'^songs/with/latex/code/$', views.get_songs_with_latex_code,
name="songs_with_latex_code"),
url(r'^songs/without/page/number/$', views.get_songs_without_page_number,
name="songs_without_page_number"),
url(r'^copyrights/extract/(?P<songs_ids>[\/0-9]+|all)/$', views.find_copyrights_data,
name="find_copyrights_data"),
url(r'^book/elements/sort/$', views.update_book_elements_list,
name="book_elements_sort"),
url(r'^book/elements/list/$', views.book_elements_list,
name="book_elements_list"),
url(r'^groups/fast/list/$', views.SongsGroupFastList.as_view(),
name="groups_fast_list"),
url(r'^groups/list/$', views.SongsGroupList.as_view(),
name="groups_list"),
url(r'^groups/(?P<pk>[0-9]+)/$', views.SongsGroupDetail.as_view(),
name="groups_detail"),
url(r'^authors/list/$', views.AuthorList.as_view(),
name="authors_list"),
url(r'^authors/(?P<pk>[0-9]+)/$', views.AuthorDetail.as_view(),
name="authors_detail"),
url(r'^editors/list/$', views.EditorList.as_view(),
name="editors_list"),
url(r'^editors/(?P<pk>[0-9]+)/$', views.EditorDetail.as_view(),
name="editors_detail"),
url(r'^themes/list/$', views.ThemeList.as_view(),
name="themes_list"),
url(r'^themes/(?P<pk>[0-9]+)/$', views.ThemeDetail.as_view(),
name="themes_detail"),
url(r'^paragraphs/list/$', views.ParagraphList.as_view(),
name="paragraphs_list"),
url(r'^paragraphs/(?P<pk>[0-9]+)/$', views.ParagraphDetail.as_view(),
name="paragraphs_detail"),
url(r'^paragraphs/invert/(?P<paragraph_id_top>[0-9]+)/and/(?P<paragraph_id_bottom>[0-9]+)/$',
views.invert_paragraphs, name="paragraphs_invert"),
url(r'^verses/list/$', views.VerseList.as_view(),
name="verses_list"),
url(r'^verses/(?P<pk>[0-9]+)/$', views.VerseDetail.as_view(),
name="verses_detail"),
url(r'^verses/invert/(?P<verse_id_top>[0-9]+)/and/(?P<verse_id_bottom>[0-9]+)/$',
views.invert_verses, name="verses_invert"),
url(r'^harmonization/list/$', views.HarmonizationList.as_view(),
name="harmonization_list"),
url(r'^harmonization/list/song/(?P<song_id>[0-9]+)/$', views.get_song_harmonizations,
name="get_song_harmonizations"),
url(r'^harmonization/(?P<pk>[0-9]+)/$', views.HarmonizationDetail.as_view(),
name="harmonization_detail"),
url(r'^author/list/songs/(?P<author_id>[0-9]+)/$', views.get_author_songs,
name="get_author_songs"),
url(r'^editor/list/songs/(?P<editor_id>[0-9]+)/$', views.get_editor_songs,
name="get_editor_songs"),
url(r'^theme/list/songs/(?P<theme_id>[0-9]+)/$', views.get_theme_songs,
name="get_theme_songs"),
url(r'^chords/list/$', views.ChordList.as_view(),
name="chords_list"),
url(r'^chords/(?P<pk>[0-9]+)/$', views.ChordDetail.as_view(),
name="chords_detail"),
url(r'^get/whole/tex/$', views.get_whole_tex_code,
name="compile_latexcode"),
url(r'^latexcode/list/$', views.SongLaTeXCodeList.as_view(),
name="latexcode_list"),
url(r'^latexcode/(?P<pk>[0-9]+)/$', views.SongLaTeXCodeDetail.as_view(),
name="latexcode_detail"),
url(r'^additional/latexcode/list/$', views.AdditionalLaTeXContentList.as_view(),
name="additional_latexcode_list"),
url(r'^additional/latexcode/(?P<pk>[0-9]+)/$', views.AdditionalLaTeXContentDetail.as_view(),
name="additional_latexcode_detail"),
]
urlpatterns = format_suffix_patterns(urlpatterns) |
skosukhin/spack | var/spack/repos/builtin/packages/scalasca/package.py | Python | lgpl-2.1 | 2,845 | 0.000703 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Scalasca(AutotoolsPackage):
"""Scalasca is a software tool that supports the performance optimization
of parallel programs by measuring and analyzing their runtime
behavior. The analysis identifies potential performance
bottlenecks | - in particular those concerning communication and
synchroniz | ation - and offers guidance in exploring their causes.
"""
homepage = "http://www.scalasca.org"
url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz"
version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585')
version('2.2.2', '2bafce988b0522d18072f7771e491ab9')
version('2.1', 'bab9c2b021e51e2ba187feec442b96e6')
depends_on("mpi")
# version 2.3
depends_on('cube@4.3:', when='@2.3:')
depends_on('otf2@2:', when='@2.3:')
# version 2.1+
depends_on('cube@4.2', when='@2.1:2.2.999')
depends_on('otf2@1.4', when='@2.1:2.2.999')
def url_for_version(self, version):
return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
config_args = ["--enable-shared"]
config_args.append("--with-cube=%s" % spec['cube'].prefix.bin)
config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin)
if self.spec['mpi'].name == 'openmpi':
config_args.append("--with-mpi=openmpi")
elif self.spec.satisfies('^mpich@3:'):
config_args.append("--with-mpi=mpich3")
return config_args
|
sparkslabs/kamaelia_ | Sketches/TG/old_shard/cshard/shardclasstest.py | Python | apache-2.0 | 2,606 | 0.02878 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Shard import *
from CDrawing import *
from cshard import getshard
## shard
# from function
s = shard(function = drawBG)
for line in s.code:
print line,
print s.name
print
# from code
s = shard(code = getshard(drawBG))
for line in s.code:
print line,
print s.name
print
s = shard(name = "drawBG", code = getshard(drawBG))
for line in s.code:
print line,
print s.name
print
# mix
s = shard(name = "drawBG", code = getshard(drawBG))
ss = shard(name = 'test', annotate = True, shards = [blitToSurface, s])
for line in ss.code:
print line,
print ss.name
print
s = shard(name = "drawBG", code = getshard(drawBG))
ss = shard(name = 'test', annotate = True, shards = [blitToSurface, s])
sss = shard(annotate = True, shards = [blitToSurface, s, ss])
for line in sss.annotate():
print line,
print
## classShard
from ClassShard import *
cs = classShard('classtest', docstring = 'docstring', shards = [s, ss])
for l in cs.code:
print l,
print
## functionShard
from FunctionShard import *
fs = functionShard('functest', shards = [drawBG, s, ss], docstring = 'commen | t here')
for l in fs.code:
print l,
print
## moduleShard
from ModuleShard import *
imps = ['lala', 'doo', 'ming']
impfrs = {'wheee': ['huphup', 'pop', 'pip'], 'nanoo': ('noom', )}
ms = moduleShard('moduletest', importmodules = imps, importfrom = impfrs, shards = [fs], docstring = 'module doc')
for l in ms.code:
pr | int l,
print
## funcAppShard
from FuncAppShard import *
ps = ['lala', 'doo', 'ming']
kws = {'wheee': "[huphup, 'pop', 'pip', 1]", 'nanoo': '"noom"', 'a': '1'}
app = funcAppShard('testcall', funcObj = None, args = ps, kwargs = kws)
for ln in app.code:
print ln,
print
app = funcAppShard('testcall', funcObj = 'testobj', args = ps, kwargs = kws)
for ln in app.code:
print ln,
print |
godfryd/pylint | test/input/func_noerror_used_before_assignment.py | Python | gpl-2.0 | 236 | 0.016949 | # pylint: disable = lin | e-too-long, multiple-statements, missing-module-attribute
"""https://bitbucket.org/logilab/pylint/issue/111/false-positive-used-before-assignment-with"""
try: raise IOError(1, "a")
except IOError, err: prin | t err
|
pmaigutyak/mp-shop | offers/migrations/0001_initial.py | Python | isc | 1,897 | 0.004744 | # Generated by Django 3.0.13 on 2021-05-19 21:21
from django.conf | import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pr | oductPriceOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('not_reviewed', 'Not reviewed'), ('processing', 'Processing'), ('canceled', 'Canceled'), ('completed', 'Completed')], default='not_reviewed', max_length=50, verbose_name='Status')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('mobile', models.CharField(max_length=255, verbose_name='Mobile phone')),
('email', models.EmailField(max_length=255, verbose_name='Email')),
('text', models.TextField(max_length=1000, verbose_name='Offer')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='price_offers', to='products.Product', verbose_name='Product')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='offers', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Product price offer',
'verbose_name_plural': 'Product price offers',
'db_table': 'products_priceoffer',
'ordering': ['-date_created'],
},
),
]
|
sfu-fas/coursys | oldcode/planning/teaching_equiv_forms.py | Python | gpl-3.0 | 1,982 | 0.007568 | from django import forms
from .models import TeachingEquivalent
from django.forms.widgets import TextInput, Textarea
from django.core import validators
from django.core.exceptions import ValidationError
from fractions import Fraction
class TeachingCreditField(forms.Field):
def to_python(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
if '.' in value:
raise ValidationError('Invalid format. Must be a whole number or a proper fraction')
try:
value = Fraction(value)
except ValueError:
raise ValidationError('Invalid format. Must be a whole | number or a proper fraction')
except ZeroDivisionError:
raise ValidationError('Denominator of fraction cannot be zero')
return value
class TeachingEquivForm( | forms.ModelForm):
credits = TeachingCreditField(help_text='The number of credits this equivalent is worth')
class Meta:
model = TeachingEquivalent
exclude = ('status', 'instructor', 'credits_numerator', 'credits_denominator')
widgets = {
'summary': TextInput(attrs={'size': 60}),
'comment': Textarea(attrs={'cols': 60, 'rows': 15}),
}
def __init__(self, *args, **kwargs):
super(TeachingEquivForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['semester', 'summary', 'credits', 'comment']
def clean(self):
cleaned_data = self.cleaned_data
credits_value = cleaned_data.get('credits')
if credits_value:
cleaned_data['credits_numerator'] = credits_value.numerator
cleaned_data['credits_denominator'] = credits_value.denominator
del cleaned_data['credits']
return cleaned_data
class CourseOfferingCreditForm(forms.Form):
credits = TeachingCreditField() |
plotly/python-api | packages/python/plotly/plotly/validators/violin/_spanmode.py | Python | mit | 516 | 0 | import _ | plotly_utils.basevalidators
class SpanmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="spanmode", parent_name="violin", **kwargs):
super(SpanmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["soft", "hard", "manual"]),
**kwargs
| )
|
nojero/pod | src/pod/test.py | Python | gpl-3.0 | 12,327 | 0.028717 |
import os
import sys
import time
import math
import networkx
import ptnet
import pes
import sat
import z3
from util import *
from log import *
from folding import *
from pod import *
def test1 () :
n = ptnet.net.Net (True)
n.read (sys.stdin, 'pnml')
n.write (sys.stdout, 'pnml')
def test2 () :
u = ptnet.unfolding.Unfolding (True)
f = open ('benchmarks/nets/small/dme2.cuf', 'r')
u.read (f)
print 'x' * 80
print 'events'
for e in u.events :
print e
print 'x' * 80
print 'conditions'
for c in u.conds :
print c
print 'x' * 80
print 'dot'
u.write (sys.stdout, 'dot')
def test3 () :
u = ptnet.unfolding.Unfolding (True)
f = open ('benchmarks/nets/small/gas_station.cuf', 'r')
u.read (f)
print 'x' * 80
print "before removing condition"
u.write (sys.stdout, 'dot')
print "condition"
print u.conds[1]
u.remove_cond (u.conds[1].nr)
print 'x' * 80
print "after removing condition"
u.write (sys.stdout, 'dot')
print 'x' * 80
print "event"
print u.events[0]
u.remove_event (u.events[0].nr)
print "after removing event"
u.write (sys.stdout, 'dot')
def test4 () :
#f = open ('benchmarks/nets/small/gas_station.cuf', 'r')
#f = open ('benchmarks/nets/small/dme2.cuf', 'r')
f = open ('benchmarks/nets/small/ab_gesc.cuf', 'r')
u = ptnet.unfolding.Unfolding (True)
u.read (f)
u.prune_by_depth (2)
u.write (sys.stdout, 'dot')
finder = merging.EquivalenceEncoding (u)
print
finder.sat_encode (1)
print
#f = open ('/tmp/out.cnf', 'w')
print repr (finder.satf)
def test5 () :
for k in range (1, 6) :
u = ptnet.unfolding.Unfolding (True)
f = open ('benchmarks/nets/small/dme2.cuf', 'r')
u.read (f)
u.prune_by_depth (k)
ff = open ('dme2-pref%d.dot' % k, 'w')
u.write (ff, 'dot')
def test6 () :
phi = sat.Cnf ()
a = sat.Integer (phi, "first", 4)
b = sat.Integer (phi, "second", 4)
v = a.encode_lt (b)
print 'returned', v
print repr (phi)
phi.add ([v])
a.encode_eq_constant (5)
b.encode_eq_constant (4)
solver = sat.SatSolver ()
model = solver.solve (phi)
print 'SAT ', model.is_sat ()
print 'UNSAT', model.is_unsat ()
print 'UNDEF', model.is_undef ()
print 'model'
print model
def test7 () :
#switch = 'sat'
#switch = 'smt1'
switch = 'smt2'
# events, conditions, k, vars, clauses, minisat time, answer
results = []
for depth in range (1, 20) :
u = ptnet.unfolding.Unfolding (True)
#f = open ('benchmarks/nets/small/dme2.cuf', 'r')
f = open ('benchmarks/nets/small/ab_gesc.cuf', 'r')
u.read (f)
u.prune_by_depth (depth)
stat_events = len (u.events)
stat_conds = len (u.conds)
k100 = len (u.events)
k75 = len (u.events) * 0.75
k50 = len (u.events) * 0.50
k25 = len (u.events) * 0.25
for k in [k100, k75, k50, k25] :
#for k in [k100, k75, k25] :
#for k in [len (u.net.trans)] :
k = int (k)
enc = EquivalenceEncoding (u)
stat_k = k
stat_labels = len (u.net.trans)
if switch == 'sat' :
enc.sat_encode (k)
stat_nvars = len (enc.satf.varmap)
stat_nclss = len (enc.satf.clsset)
solver = sat.SatSolver ()
tstart = time.time ()
model = solver.solve (enc.satf, 60)
tend = time.time ()
stat_answer = model.result
elif switch == 'smt2' :
enc.smt_encode_2 (k)
stat_nvars = 0
stat_nclss = len (enc.z3.assertions ())
enc.z3.set ("timeout", 1000 * 60)
tstart = time.time ()
result = enc.z3.check ()
tend = time.time ()
if result == z3.sat :
stat_answer = 'sat'
elif result == z3.unsat :
stat_answer = 'unsat'
else :
stat_answer = '?'
stat_runtime = tend - tstart
res = (depth,
stat_events, \
stat_conds, \
stat_labels, \
stat_k, \
stat_nvars, \
stat_nclss, \
stat_runtime, \
stat_answer)
results.append (res)
print "depth\tevents\tconds\tlabels\tk\tnvars\tnclaus\truntime\tanswer"
for (d, nre, nrc, nrl, k, nv, nc, t, a) in results :
s = "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%.2f\t%s" % \
(d, nre, nrc, nrl, k, nv, nc, t, a)
print s
def test8 () :
x = z3.Int ('x')
y = z3.Int ('y')
s = z3.Solver ()
print 'id of x :', id (x)
print 'id of y :', id (y)
print 'id of x (1) :', id (z3.Int ('x'))
print 'id of y (1) :', id (z3.Int ('y'))
z1 = z3.Int ('z')
z2 = z3.Int ('z')
print 'id of z1 :', id (z1)
print 'id of z2 :', id (z2)
s.add (y != x)
s.add (x >= y)
s.add (z1 == z2)
expr = z3.Or ([z3.Int ('i%d' % i) == y for i in range (4)])
print 'final expression', expr
s.add (expr)
expr = z3.Or (x == y)
expr = z3.Or (expr, x == z1)
expr = z3.Or (expr, x == z2)
s.add (expr)
print 'second final expression', expr
print 'constraints to solve:', s
c = s.check ()
print 'result:', c
if c == z3.sat :
m = s.model ()
print 'model:', m
print m[0]
print 'type 0', type (m[0])
print '0 class', m[0].__class__
print 'type constrain', type (y > 1023)
print 'type m[x]', type (m[x])
print 'type m[x].as_long', type (m[x].as_long ())
print 'type m[x].as_string', type (m[x].as_stri | ng ())
print 'value m[y].as_long', m[y].as_long ()
n = z3.Int ('new_var')
print m[n]
def test9 () :
s = z3.Solver ()
x = z3.Int ('x')
y = z3.Int ('y')
z = z3.Int | ('z')
p = z3.Bool ('p')
s.add (p == (x == y))
s.add (x == y)
#s.add (z3.Not (p))
s.add (0 <= sum ([y, z], x))
s.add (True)
s.add (z3.Distinct ([x, y, z]))
print 'solving', s
r = s.check ()
print 'result:', r
if r == z3.sat :
m = s.model ()
print 'model:', m
def test10 () :
f = open ('benchmarks/nets/small/ab_gesc.cuf', 'r')
u = ptnet.unfolding.Unfolding (True)
u.read (f)
print 'prunning'
u.prune_by_depth (1)
#u.write (sys.stdout, 'dot')
enc = EquivalenceEncoding (u)
print 'building encoding'
enc.smt_encode_2 (43)
print 'xxxxxxxxxxxxxxxxxxxxxxxxxx'
#for cons in enc.z3.assertions () : print '(', cons, ')'
#print enc.z3.to_smt2 ()
print 'xxxxxxxxxxxxxxxxxxxxxxxxxx'
print_stats (sys.stdout, enc.stats ())
print 'solvingggggggggggggggggggggggggggggggggggggggggggggggggg'
enc.z3.set ("timeout", 1000 * 50)
tstart = time.time ()
r = enc.z3.check ()
tend = time.time ()
print 'result:', r
if r == z3.sat :
m = enc.z3.model ()
#print 'model:', m
print 'z3 running time: %.2f' % (tend - tstart)
print 'z3 satistics'
print enc.z3.statistics ()
def test11 () :
f = open ('benchmarks/nets/small/ab_gesc.cuf', 'r')
u = ptnet.unfolding.Unfolding (True)
u.read (f)
print 'prunning'
u.prune_by_depth (1)
u.add_bottom ()
#u.write (sys.stdout, 'dot')
print u.events
solver = EquivalenceSolver (u)
print 'building encoding'
me = solver.find_with_measure (11)
if me != None :
print 'merging, me', me
po = Podisc ()
net = po.merge (u, me)
net.write (sys.stdout, 'dot')
else :
print 'merging: no!!!'
def test12 () :
print log_from_xes ('benchmarks/logs/a22f0n00_1.xes', all_info=True,only_uniq_cases=False)
print log_from_xes ('benchmarks/logs/a22f0n00_1.xes')
def test13 () :
l = Log ()
#f = open ('benc |
apagac/cfme_tests | cfme/fixtures/pytest_store.py | Python | gpl-2.0 | 6,454 | 0.002169 | """Storage for pytest objects during test runs
The objects in the module will change during the course of a test run,
so they have been stashed into the 'store' namespace
Usage:
# imported directly (store is pytest.store)
from cfme.fixtures.pytest_store import store
store.config, store.pluginmanager, store.session
The availability of these objects varies during a test run, but
all should be available in the collection and testing phases of a test run.
"""
import os
import sys
import fauxfactory
from _pytest.terminal import TerminalReporter
from cached_property import cached_property
from py.io import TerminalWriter
from cfme.utils import diaper
class FlexibleTerminalReporter(TerminalReporter):
"""A TerminalReporter stand-in that pretends to work even without a py.test config."""
def __init__(self, config=None, file=None):
if config:
# If we have a config, nothing more needs to be done
return TerminalReporter.__init__(self, config, file)
# Without a config, pretend to be a TerminalReporter
# hook-related functions (logreport, collection, etc) will be outrigt broken,
# but the line writers should still be usable
if file is None:
file = sys.stdout
self._tw = self.writer = TerminalWriter(file)
self.hasmarkup = self._tw.hasmarkup
self.reportchars = ''
self.currentfspath = None
class Store(object):
"""pytest object store
If a property isn't available for any reason (including being accessed outside of a pytest run),
it will be None.
"""
@property
def current_appliance(self):
# layz import due to loops and loops and loops
from cfme.utils import appliance
# TODO: concieve a better way to detect/log import-time missuse
# assert self.config is not None, 'current appliance not in scope'
return appliance.current_appliance
def __init__(self):
#: The py.test config instance, None if not in py.test
self.config = None
#: The current py.test session, None if not in a py.test session
self.session = None
#: Parallelizer role, None if not running a parallelized session
self.parallelizer_role = None
# Stash of the "real" terminal reporter once we get it,
# so we don't have to keep going through pluginmanager
self._terminalreporter = None
#: hack variable until we get a more sustainable solution
self.ssh_clients_to_close = []
self.uncollection_stats = {}
@property
def has_config(self):
return self.config is not None
def _maybe_get_plugin(self, name):
""" returns the plugin if the pluginmanager is availiable and the plugin exists"""
return self.pluginmanager and self.pluginmanager.getplugin(name)
@property
def in_pytest_session(self):
return self.session is not None
@property
def fixturemanager(self):
# "publicize" the fixturemanager
return self.session and self.session._fixturemanager
@property
def capturemanager(self):
return self._maybe_get_plugin('capturemanager')
@property
def pluginmanager(self):
# Expose this directly on the store for convenience in getting/setting plugins
return self.config and self.config.pluginmanager
@property
def terminalreporter(self):
if self._terminalreporter is not None:
return self._terminalreporter
reporter = self._maybe_get_plugin('terminalreporter')
if reporter and isinstance(reporter, TerminalReporter):
self._terminalreporter = reporter
return reporter
return FlexibleTerminalReporter(self.config)
@property
def terminaldistreporter(self):
return self._maybe_get_plugin('terminaldistreporter')
@property
def parallel_session(self):
return self._maybe_get_plugin('parallel_session')
@property
def slave_manager(self):
return self._maybe_get_plugin('slave_manager')
@property
def slaveid(self):
return getattr(self.slave_manager, 'slaveid', None)
@cached_property
def my_ip_address(self):
try:
# Check the environment first
return os.environ['CFME_MY_IP_ADDRESS']
except KeyError:
# Fall back to having an appliance tell us what it thinks our IP
# address is
return self.current_appliance.ssh_client.client_address()
def write_line(self, line, **kwargs):
return write_line(line, **kwargs)
store = Store()
def pytest_namespace():
# Expose the pytest store as pytest.store
return {'store': store}
def pytest_plugin_registered(manager):
# config will be set at the second call to this hook
if store.config is None:
store.config = manager.getplugin('pytestconfig')
def pytest_sessionstart(session):
store.session = session
def write_line(line, **kwargs):
"""A write-line helper that should *always* write a line to the terminal
It knows all of py.tests dirty tricks, including ones that we made, and works around them.
Args:
**kwargs: Normal kwargs for pytest line formatting, stripped from slave messages
"""
if store.slave_manager:
# We're a pytest slave! Write out the vnc info through the slave manager
store.slave_manager.message(line, **kwargs)
else:
# If py.test is su | pressing stdout/err, turn that off for a moment
with diaper:
store.capturemanager.suspendcapture()
| # terminal reporter knows whether or not to write a newline based on currentfspath
# so stash it, then use rewrite to blow away the line that printed the current
# test name, then clear currentfspath so the test name is reprinted with the
# write_ensure_prefix call. shenanigans!
cfp = store.terminalreporter.currentfspath
# carriage return, write spaces for the whole line, carriage return, write the new line
store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line,
**kwargs)
store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8)
store.terminalreporter.write_ensure_prefix(cfp)
# resume capturing
with diaper:
store.capturemanager.resumecapture()
|
DAInamite/uav_position_controller | rqt_position_controller/src/rqt_position_controller/position_controller.py | Python | gpl-3.0 | 4,618 | 0.008662 | #
# Author: Christopher-Eyk Hrabia
# christopher-eyk.hrabia@dai-labor.de
#
import os
import rospy
import rospkg
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtGui import QWidget
from python_qt_binding.QtGui import QVBoxLayout
from pid_controller import PIDConfiguration
from collections import deque
# RQT plugin for position_controller configuration
class Configuration(Plugin):
isAutoSend = False
def __init__(self, context):
super(Configuration, self).__init__(context)
# Give QObjects reasonable names
self.setObjectName('PositionControllerConfiguration')
self.__context = context
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
| help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
# Create QWidget
self._widget = QWidget()
# Get path to UI file which should be in the "resource" folder of this package
ui_file = os.path.join(ros | pkg.RosPack().get_path('rqt_position_controller'), 'resource', 'configuration.ui')
# Extend the widget with all attributes and children from UI file
loadUi(ui_file, self._widget)
# Give QObjects reasonable names
self._widget.setObjectName('ControllerUi')
self.__pids = deque()
self.__pids.append(PIDConfiguration('Altitude Pos','altitude_pos'))
self.__pids.append(PIDConfiguration('Altitude','altitude'))
self.__pids.append(PIDConfiguration('Pitch Pos','pitch_pos'))
self.__pids.append(PIDConfiguration('Pitch','pitch'))
self.__pids.append(PIDConfiguration('Roll Pos','roll_pos'))
self.__pids.append(PIDConfiguration('Roll','roll'))
self.__pids.append(PIDConfiguration('Yaw Angle','yaw_angle'))
self.__pids.append(PIDConfiguration('Yaw','yaw'))
self.__pidLayout = QVBoxLayout()
for pid in self.__pids:
self.__pidLayout.addWidget(pid)
pid.updated.connect(self.onPidUpdate)
self._scrollWidget = QWidget()
self._scrollWidget.setLayout(self.__pidLayout)
self._widget.pidScrollArea.setWidget(self._scrollWidget)
self._widget.pushButtonRefresh.clicked.connect(self.refresh)
self._widget.pushButtonSend.clicked.connect(self.send)
self._widget.checkBoxAutoSend.stateChanged.connect(self.changedAutoSend)
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
self.refresh()
# update all pids
def refresh(self):
for pid in self.__pids:
pid.refresh()
# send all pid configs to remote
def send(self):
for pid in self.__pids:
pid.send_current()
def onPidUpdate(self):
if self.isAutoSend:
self.sender().send_current()
def changedAutoSend(self, state):
self.isAutoSend = state > 0
def shutdown_plugin(self):
# TODO unregister all publishers here
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
|
stefanp312/chat-bot | run.py | Python | mit | 1,264 | 0.000791 | from flask import Flask, request, redirect, session
import twilio.twiml
import navigation
SECRET_KEY = 'donuts'
logging = True
app = Flask(__name__)
app.config.from_object(__name__)
def log(mesagge=""):
if logging:
print mesagge
@app.route("/", methods=['GET', 'POST'])
def main_reply():
# Log values from request
from_number = request.values.get('From', None)
log(from_number)
recieved_message = request.values.get('Body')
log(recieved_message)
# pick reply to message
reply = navigation.choose_script(bodyText=recieved_message)
# trim the length of the reply to one text
if len(reply) > 160:
reply = reply[0:159]
if reply == "":
reply = "Error."
# get the response scheme f | rom twilio and add reply as message body
resp = twilio.twiml.Response()
resp.message(reply.encode("utf-8"))
# log server reply
log(reply)
# store previous queries of the user in a cookie
searchs = session.get('searchs', [])
searchs.append(recieved_message)
replies = session.get('searchs', [])
replies.append(reply)
# Save the new cmds/searchs list in the session
session['searchs'] = searchs
return str | (resp)
if __name__ == "__main__":
app.run(debug=True)
|
ChildMindInstitute/HBN-wearable-analysis | docs/conf.py | Python | apache-2.0 | 5,181 | 0.001737 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# HBN Wearable Analysis documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 28 15:25:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.md'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HBN Wearable Analysis'
copyright = '2017, Jon Clucas'
author = 'Jon Cluc | as'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y | version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'HBNWearableAnalysisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HBNWearableAnalysis.tex', 'HBN Wearable Analysis Documentation',
'Jon Clucas', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hbnwearableanalysis', 'HBN Wearable Analysis Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HBNWearableAnalysis', 'HBN Wearable Analysis Documentation',
author, 'HBNWearableAnalysis', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
zyantific/continuum | continuum/client.py | Python | mit | 4,170 | 0.002158 | """
This file is part of the continuum IDA PRO plugin (see zyantific.com).
The MIT License (MIT)
Copyright (c) 2016 Joel Hoener <athre0z@zyantific.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, T | ORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, print_function, division
import sys
import asyncore
from idc import *
from idautils import *
from .proto import ProtoMixin
from PyQt5.QtCore import QObject, pyqtSignal
class Client(QObject, ProtoMixin, asyn | core.dispatcher_with_send):
"""Client class for the localhost network."""
client_analysis_state_updated = pyqtSignal([str, str]) # idb_path, state
sync_types = pyqtSignal([bool]) # purge_non_indexed
def __init__(self, sock, core):
asyncore.dispatcher_with_send.__init__(self, sock=sock)
ProtoMixin.__init__(self)
QObject.__init__(self)
self.core = core
self.idb_path = GetIdbPath()
self.send_packet({
'kind': 'new_client',
'input_file': GetInputFile(),
'idb_path': GetIdbPath(),
'pid': os.getpid(),
})
print("[continuum] Connected.")
def handle_close(self):
asyncore.dispatcher_with_send.handle_close(self)
print("[continuum] Connection lost, reconnecting.")
self.core.create_client()
def handle_msg_focus_symbol(self, symbol, **_):
for i in xrange(GetEntryPointQty()):
ordinal = GetEntryOrdinal(i)
if GetEntryName(ordinal) == symbol:
# `Jump` also focuses the instance.
Jump(GetEntryPoint(ordinal))
break
def handle_msg_focus_instance(self, **_):
Jump(ScreenEA())
def handle_msg_become_host(self, **_):
print("[continuum] We were elected as host.")
self.core.create_server_if_none()
def handle_msg_analysis_state_updated(self, client, state, **_):
self.client_analysis_state_updated.emit(client, state)
def handle_msg_sync_types(self, purge_non_indexed, **_):
self.sync_types.emit(purge_non_indexed)
@staticmethod
def _allow_others_focusing():
if sys.platform == 'win32':
# On Windows, there's a security mechanism preventing other applications
# from putting themselves into the foreground unless explicitly permitted.
import ctypes
ctypes.windll.user32.AllowSetForegroundWindow(-1)
def send_focus_symbol(self, symbol):
self._allow_others_focusing()
self.send_packet({
'kind': 'focus_symbol',
'symbol': symbol,
})
def send_focus_instance(self, idb_path):
self._allow_others_focusing()
self.send_packet({
'kind': 'focus_instance',
'idb_path': idb_path,
})
def send_analysis_state(self, state):
self.send_packet({
'kind': 'update_analysis_state',
'state': state,
})
def send_sync_types(self, purge_non_indexed):
self.send_packet({
'kind': 'sync_types',
'purge_non_indexed': purge_non_indexed,
})
|
dgm816/simple-index | nntp/nntp.py | Python | mit | 11,160 | 0.000538 | import re
import socket
import ssl
class MyNntp:
def __init__(self, server, port, use_ssl):
"""Constructor
Pass in the server, port, and ssl usage value for connect.
"""
# just store the values for now
self.server = server
self.port = port
self.ssl = use_ssl
# define variables we will use throughout our object
self.s = None
self.data = unicode()
self.code = None
self.text = None
def parse(self):
"""Parse response to command from server.
Break the first line from the data buffer into its component parts (if
it is formatted properly) and retain the three digit server code as well
as the ASCII string.
Server code and ASCII string will be set in self.code and self.text if
it is successfully parsed. The first line will be removed from the data
buffer as well.
If the format is not recognized None will be set it both variables. No
data will be removed from the buffer.
"""
# make sure we clear any old response stored
self.code = None
self.text = None
# grab the first line from our relieved data (without line ending)
index = self.data.find("\r\n")
line = self.data[:index]
# break apart the response code and (optionally) the rest of the line
match = re.match(r"(\d{3})(?: +(.+))?", line)
# check for match
if match:
# store our code and text
self.code = match.group(1)
self.text = match.group(2)
# remove our processed line (including line endings)
self.data = self.data[index+2:]
# we are done
return
def fetch(self):
"""Get server response.
Get the response to a command sent to the NNTP server and parse it.
"""
# receive data from server
self.data = self.s.recv(1024)
# parse server response
self.parse()
return
def connect(self):
"""Connect to NNTP server.
Using the server address, port, and a flag to use ssl, we will connect to
the server and parse the response from the server using standard sockets.
"""
# create a socket object and connect
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.ssl:
self.s = ssl.wrap_socket(self.s)
self.s.connect((self.server, self.port))
# get data from the server
self.fetch()
# check for success
if self.code != '200':
return False
else:
return True
def send(self, command):
"""Send a command to the server and get the response.
Send the command out the connected socket to the server and parse the
response.
"""
# send the command to the server
self.s.sendall(command + "\r\n")
# get the response from the server
self.fetch()
return
def login(self, username, password):
"""Login to server.
Login to the server using a username and password. Check for failure to
login and intelligently handle server responses.
"""
# send the username to the server
self.send("AUTHINFO USER " + username)
# get code 381 if a password is required
if self.code != '381':
return False
# send the password to the server
self.send("AUTHINFO PASS " + password)
# get code 281 if successfully logged in
if self.code != '281':
return False
# all went well, return true
return True
def capabilities(self):
"""Capabilities
List the server capabilities from the server.
"""
self.send("CAPABILITIES")
# check for 101 for capabilities response
if self.code != '101':
return False
# this is our end of transmission flag
eot = False
# keep looping until our transmission is finished
while not eot:
# process the data in our buffer
for line in self.data.splitlines(True):
# check for a full line
if line.endswith("\r\n"):
# check for end of multi line response
if line == ".\r\n":
eot = True
else:
print("capabilities response: %s" % line)
# remove line
line, self.data = self.data.split("\r\n", 1)
# if we have not finished...
if not eot:
# receive more data from server
self.data += self.s.recv(1024)
# all went well, return true
return True
def quit(self):
"""Quit
Close the server connection.
"""
self.send("QUIT")
# check for 205 for quit response
if self.code != '205':
return False
# all went well, return true
return True
def group(self, group):
"""Group
Select a newsgroup as the currently selected newsgroup.
"""
self.send("GROUP %s" % group)
# check for 211 for group response
if self.code != '211':
return False
# regex pattern to recognize results
pattern = re.compile(r"(\S+) +(\S+) +(\S+) +(\S+)")
# apply pattern to line
match = pattern.match(self.text)
if match:
self.group_number = int(match.group(1))
self.group_low = int(match.group(2))
self.group_high = int(match.group(3))
self.group_group = match.group(4)
else:
self.group_number = 0
self.group_low = 0
self.group_high = 0
self.group_group = ""
return False
# all went well, return true
return True
def over(self, parameters=None):
"""Overview
Get headers for the selected newsgroup.
"""
self.send("XOVER")
# check for 224 for over response
if self.code != '224':
return False
# this is our end of transmission flag
eot = False
# keep looping until our transmission is finished
while not eot:
# process the data in our buffer
for line in self.data.splitlines(True):
# check for a full line
if line.endswith("\r\n"):
# check for end of multi line response
if line == ".\r\n":
eot = True
else:
# break on tabs
fields = line.split("\t")
for field in fi | elds:
print("%s" % field)
# remove line
line, self.data = self.data.split("\r\n", | 1)
# if we have not finished...
if not eot:
# receive more data from server
self.data += self.s.recv(1024)
# all went well, return true
return True
def zver(self, low, high):
"""Compressed overview
Get compressed headers for the selected newsgroup.
"""
self.send("XZVER {0}-{1}".format(low, high))
# check for 224 for over response
if self.code != '224':
return False
# this is our end of transmission flag
eot = False
yencData = ""
# keep looping until our transmission is finished
while not eot:
# process the data in our buffer
for line in self.data.splitlines(True):
# check for a full line
if line.endswith("\r\n"):
# check for end of multi line response
if line == ".\r\n":
eot = True
else:
# append data
yencData += line
# remove line
|
javiere/GPXCardio | GPXCardio.py | Python | gpl-2.0 | 4,530 | 0.010375 | """
Reads the cardio data from GPX files and generates plots with it.
Requires the following libraries:
* matplotlib
Author: Javier Espigares Martin
Email: javierespigares@gmail.com
GNU v2.0 License
"""
from datetime import datetime, date, time
class GPXCardio():
"""
GPX Cardio class. Opens the filename given by parameters. Obtains the
relevant data and returns it in a python friendly manner. It can also produce a
plot with the heart rate information from the fetched data
"""
def __init__(self, filename, verbose=False):
"""
Initializes the object:
parameters:
- filename: Filename to open
- verbose: Prints out information of the opened file as it is
encountered
"""
import xml.etree.ElementTree as ET
self.__filename__ = filename
self.__verbose__ = verbose
if verbose:
print "Parsing", filename
tree = ET.parse(filename)
self.__root__ = tree.getroot()
self.__nsdic__ = {"default": "http://www.topografix.com/GPX/1/1",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"schemaLocation": "",
"gpxtpx": "http://www.garmin.com/xmlschemas/TrackPointExtension/v1",
"gpxx": "http://www.garmin.com/xmlschemas/GpxExtensions/v3"}
self.getCardio()
if verbose :
print selfe
def __str__(self):
if not hasattr(self, '__heart_rate__'):
self.getCardio()
s = "Heart Rate record for " + self.__filename__ + "\n"
for i in self.__heart_rate__:
s = s + "Time : " + str(i[0]) + " Heart Rate: " + str(i[1]) +"\n"
return s
def getCardio(self):
"""
Returns the cardio data from the GPX file as a list with the format:
(datetime,heart rate [bpm]) Where datetime is an object datetime.datetime
and heart rate is a float. The list is also stored in the class. If the
method is called a second time it will return the existing list rather
than reading it onece again
If the file does not contain hear rate information, the it returns an
empty list.
"""
if hasattr(self, '__heart_rate__'):
return self.__heart_rate__
self.__heart_rate__ = []
for pt in self.__root__[1][1].findall('.//default:trkpt', self.__nsdic__):
hr = pt.find('.//gpxtpx:hr', self.__nsdic__).text
tm = pt.find('.//default:time', self.__nsdic__).text
# Divides the data/time string into the different parts using
# the standard format found in files.
# YYYY-MM-DDTHH:MM:SSZ
tm = tm.split('T')
dt = tm[0].split('-')
d = date(int(dt[0]), int(dt[1]), int(dt[2]))
dt = tm[1][0:len(tm[1]) - 1]
dt = dt.split(':')
t = time(int(dt[0]), int(dt[1]), int(dt[2]))
del(tm)
tm = datetime.combine(d, t)
self.__heart_rate__.append((tm, float(hr)))
return self.__heart_rate__
def plotCardio(self):
"""
Produces a plot of the heart rate information found in the opened file.
Returns a matplotlib.pyplot object with the plot description
"""
import matplotlib.pyplot as plt
self.getCardio()
initial_datetime = self.__heart_rate__[0][0]
hrpts = map(lambda x: x[1], self.__heart_rate__)
timepts = map(lambda x: (x[0] - initial_datetime).seconds,
self.__heart_rate__)
plt.plot(timepts, hrpts, 'ro')
plt.ylabel("Heart Rate [bpm]")
plt.xlabel("Seconds from begining")
return plt, hrpts, timepts
def compare_hr_run(filename1, filename2, descriptor1='1st HR',
descriptor2='2nd HR', verbose=False):
"""
Method to generate a plot comparison between two gpx runs
"""
import matplotlib.pyplot as plt
run1 = GPXCardio(filename1, verbose)
run2 = GPXCardio(filename2, verbose)
cardio1 = run1.getCardio()
cardio2 = run2.getCardio()
# Assume 1st file goes first in time
def pts_fun(it, hr):
t = map(lambda x: (x[0] - it).seconds, hr)
hr = map(lambda x: x[1], hr)
return t, hr
initial_time = cardio1[0][0]
f1_time, f1_hr = pts_fun(initial_time, cardio1)
f2_time, f2_hr = pts_fun(initial_time, cardio2)
lines = plt.plot(f1_time, f1_hr, 'r', f2_time, f2_hr, 'b')
plt.ylabel("Heart Rate [bpm]")
plt.xlabel("Seconds from begining")
plt.title("Heart Rate Monitor Comparison")
plt.grid(True)
plt.figlegend((lines), (descriptor1, descriptor2), 'lower right')
plt.show()
# if __name__ == "__ma | in__":
# compare_hr_run(
# 'data/Garmin.gpx', 'data/Microsoft.gpx', 'Garmin' | , 'MS Band')
|
FireballDWF/cloud-custodian | tools/c7n_azure/c7n_azure/constants.py | Python | apache-2.0 | 4,200 | 0.00119 | # Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Azure Functions
"""
# Docker version from https://hub.docker.com/r/microsoft/azure-functions/
FUNCTION_DOCKER_VERSION = 'DOCKER|mcr.microsoft.com/azure-functions/python:2.0-python3.6-appservice'
FUNCTION_EXT_VERSION = '~2'
FUNCTION_EVENT_TRIGGER_MODE = 'azure-event-grid'
FUNCTION_TIME_TRIGGER_MODE = 'azure-periodic'
FUNCTION_KEY_URL = 'hostruntime/admin/host/systemkeys/_master?api-version=2018-02-01'
FUNCTION_CONSUMPTION_BLOB_CONTAINER = 'cloud-custodian-packages'
FUNCTION_PACKAGE_SAS_EXPIRY_DAYS = 365 * 10 # 10 years
FUNCTION_AUTOSCALE_NAME = 'cloud_custodian_default'
"""
Azure Container Host
"""
CONTAINER_EVENT_TRIGGER_MODE = 'container-event'
CONTAINER_TIME_TRIGGER_MODE = 'container-periodic'
ENV_CONTAINER_EVENT_QUEUE_ID = 'AZURE_EVENT_QUEUE_RESOURCE_ID'
ENV_CONTAINER_EVENT_QUEUE_NAME = 'AZURE_EVENT_QUEUE_NAME'
ENV_CONTAINER_POLICY_STORAGE = 'AZURE_CONTAINER_STORAGE'
ENV_CONTAINER_OPTION_LOG_GROUP = 'AZURE_CONTAINER_LOG_GROUP'
ENV_CONTAINER_OPTION_METRICS = 'AZURE_CONTAINER_METRICS'
ENV_CONTAINER_OPTION_OUTPUT_DIR = 'AZURE_CONTAINER_OUTPUT_DIR'
"""
Event Grid Mode
"""
EVENT_GRID_UPN_CLAIM_JMES_PATH = \
'data.claims."http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn"'
EVENT_GRID_SP_NAME_JMES_PATH = 'data.claims.appid'
EVENT_GRID_SERVICE_ADMIN_JMES_PATH = \
'data.claims."http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"'
EVENT_GRID_P | RINCIPAL_TYPE_JMES_PATH = 'data.authorization.evidence.principalType'
EVENT_GRID_PRINCIPAL_ROLE_JMES_PATH = 'data.authorization.evidence.role'
EVENT_GRID_EVENT_TIME_PATH = 'eventTime'
"""
Environment Variables
"""
ENV_TENANT_ID = 'AZURE_TENANT_ID'
ENV_CLIENT_ID = 'AZURE_CLIENT_ID'
ENV_SUB_ID = 'AZURE_SUBSCRIPTION_ID'
ENV_CLIENT_SECRET = 'AZURE_CLIENT_SECRET'
ENV_KEYVAULT_CLIENT_ID = 'AZURE_KEYVAULT_CLIENT_ID'
ENV_KEYVAULT_SECRET_ID = 'AZURE_KEYVAULT_SECRET'
ENV_ACCESS_TOKEN = 'AZURE_ACCESS_TOKEN | '
ENV_USE_MSI = 'AZURE_USE_MSI'
ENV_FUNCTION_TENANT_ID = 'AZURE_FUNCTION_TENANT_ID'
ENV_FUNCTION_CLIENT_ID = 'AZURE_FUNCTION_CLIENT_ID'
ENV_FUNCTION_CLIENT_SECRET = 'AZURE_FUNCTION_CLIENT_SECRET'
ENV_FUNCTION_SUB_ID = 'AZURE_FUNCTION_SUBSCRIPTION_ID'
ENV_FUNCTION_MANAGEMENT_GROUP_NAME = 'AZURE_FUNCTION_MANAGEMENT_GROUP_NAME'
# Allow disabling SSL cert validation (ex: custom domain for ASE functions)
ENV_CUSTODIAN_DISABLE_SSL_CERT_VERIFICATION = 'CUSTODIAN_DISABLE_SSL_CERT_VERIFICATION'
"""
Authentication Resource
"""
RESOURCE_ACTIVE_DIRECTORY = 'https://management.core.windows.net/'
RESOURCE_STORAGE = 'https://storage.azure.com/'
RESOURCE_VAULT = 'https://vault.azure.net'
"""
Threading Variable
"""
DEFAULT_MAX_THREAD_WORKERS = 3
DEFAULT_CHUNK_SIZE = 20
"""
Custom Retry Code Variables
"""
DEFAULT_MAX_RETRY_AFTER = 30
"""
KeyVault url templates
"""
TEMPLATE_KEYVAULT_URL = 'https://{0}.vault.azure.net'
"""
Azure Functions Host Configuration
"""
FUNCTION_HOST_CONFIG = {
"version": "2.0",
"healthMonitor": {
"enabled": True,
"healthCheckInterval": "00:00:10",
"healthCheckWindow": "00:02:00",
"healthCheckThreshold": 6,
"counterThreshold": 0.80
},
"functionTimeout": "00:05:00",
"logging": {
"fileLoggingMode": "debugOnly"
},
"extensions": {
"http": {
"routePrefix": "api",
"maxConcurrentRequests": 5,
"maxOutstandingRequests": 30
}
}
}
FUNCTION_EXTENSION_BUNDLE_CONFIG = {
"id": "Microsoft.Azure.Functions.ExtensionBundle",
"version": "[1.*, 2.0.0)"
}
"""
Azure Storage
"""
BLOB_TYPE = 'blob'
QUEUE_TYPE = 'queue'
TABLE_TYPE = 'table'
FILE_TYPE = 'file'
|
PyKudos/KudoEdit | KudoEdit/KudoEdit.py | Python | mit | 12,273 | 0.011081 | import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import *
import os
class Window(QtGui.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.filename = None
self.initUI()
def initUI(self):
self.italic_flag = False
self.underline_flag = False
self.path = os.path.abspath(__file__)
self.icon_path = "/".join(self.path.split("/")[:-1]+["icons"])
self.exitclick = self.add_action("Exit", "Ctrl+Q",
"/".join([self.icon_path,"exit_icon.png"]),
qApp.quit)
self.newclick = self.add_action("New", "Ctrl+N",
"/".join([self.icon_path,"new_icon.png"]),
self.newfile)
self.openclick = self.add_action("Open", "Ctrl+O",
"/".join([self.icon_path,"open_icon.png"]),
self.openfile)
self.saveclick = self.add_action("Save", "Ctrl+S",
"/".join([self.icon_path,"save_icon.png"]),
self.savefile)
self.saveasclick = self.add_action("SaveAs", "Ctrl+Shift+S",
"/".join([self.icon_path,"save_as_icon.gif"]),
self.save_asfile)
self.copyclick = self.add_action("Copy", "Ctrl+C",
"/".join([self.icon_path,"copy_icon.png"]),
self.copy)
self.pasteclick = self.add_action("Paste", "Ctrl+V",
"/".join([self.icon_path,"paste_icon.jpg"]),
self.paste)
#self.printclick = self.add_action("Print", "Ctrl+P",
# "/".join([self.icon_path,"print_icon.jpg"]),
# self.printclick)
self.close_tab_click = self.add_action("Close", "Ctrl+W",
self,
self.close_tab)
self.italicclick = self.add_action("Italic", "Ctrl+I",
"/".join([self.icon_path,"italic_icon.png"]),
self.italic)
self.boldclick = self.add_action("Bold", "Ctrl+B",
"/".join([self.icon_path,"bold_icon.png"]),
self.bold)
self.underlineclick = self.add_action("Underline", "Ctrl+U",
"/".join([self.icon_path,"underline_icon.png"]),
self.underline)
tab = QTextEdit()
self.tab_widget = QTabWidget()
self.tab_widget. | tabsClosable()
textEditf = QFont()
|
layout = QVBoxLayout(tab)
QtCore.QObject.connect(self.tab_widget,
QtCore.SIGNAL('tabCloseRequested(int)'),
self.close_tab)
self.setCentralWidget(self.tab_widget)
self.statusBar()
self.toolbar = self.addToolBar('New')
self.toolbar.addAction(self.newclick)
self.toolbar.addAction(self.saveclick)
self.toolbar.addAction(self.saveasclick)
self.toolbar.addAction(self.openclick)
self.toolbar.addAction(self.exitclick)
self.toolbar.addAction(self.copyclick)
self.toolbar.addAction(self.pasteclick)
self.toolbar.addAction(self.boldclick)
self.toolbar.addAction(self.italicclick)
self.toolbar.addAction(self.underlineclick)
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
fileMenu.addAction(self.newclick)
fileMenu.addAction(self.openclick)
fileMenu.addAction(self.saveclick)
fileMenu.addAction(self.saveasclick)
fileMenu.addAction(self.close_tab_click)
#fileMenu.addAction(printclick)
fileMenu.addAction(self.exitclick)
editMenu = menubar.addMenu('Edit')
editMenu.addAction(self.copyclick)
editMenu.addAction(self.pasteclick)
viewMenu = menubar.addMenu('View')
viewMenu.addAction(self.italicclick)
viewMenu.addAction(self.boldclick)
viewMenu.addAction(self.underlineclick)
self.showMaximized()
self.show()
def add_action(self, action_name, shortcut=None, icon_path=None, trigger_action=None ):
action = QAction(QIcon(icon_path), action_name, self)
action.setShortcut(shortcut)
action.setStatusTip(action_name)
action.triggered.connect(trigger_action)
return action
def keyReleaseEvent(self, e):
tab_index = self.tab_widget.currentIndex()
tabText = self.tab_widget.tabText(tab_index)
self.tab_widget.tabBar().setTabTextColor(tab_index,
QColor(255,0,0))
if tab_index < 0:
return
if tabText != "untitled*" and tabText[-1] != "*":
tabText = tabText+"*"
self.tab_widget.setTabText(tab_index,tabText)
def close_tab(self):
print "closing tab"
tab_index = self.tab_widget.currentIndex()
if tab_index < 0:
qApp.quit()
return
tabText = self.tab_widget.tabText(tab_index)
if tabText[-1] == "*":
msgBox = QMessageBox()
msgBox.setText("The document has been modified.")
msgBox.setInformativeText("Do you want to save your changes?")
msgBox.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Save)
ret = msgBox.exec_()
if ret == QMessageBox.Save:
self.savefile()
self.close_tab()
elif ret == QMessageBox.Discard:
pass
elif ret == QMessageBox.Cancel:
return
self.tab_widget.removeTab(tab_index)
"""
def printfile(self):
#print_cmd = 'lp -d NetPrinter filename'
text=self.textEdit.toPlainText()
os.popen(str(text))
#self.textEdit.print_(os.printer)
"""
def italic(self):
italic_button = self.toolbar.widgetForAction(self.italicclick)
italic_icon = QIcon("/".join([self.icon_path,"italic_icon.png"]))
print self.italic_flag
if not self.italic_flag:
new_pixmap = italic_icon.pixmap(QtCore.QSize(20,20),QIcon.Disabled,QIcon.On)
else:
new_pixmap = italic_icon.pixmap(QtCore.QSize(20,20),QIcon.Active, QIcon.On)
new_icon = QIcon(new_pixmap)
italic_button.setIcon(new_icon)
tab_index = self.tab_widget.currentIndex()
textEdit = self.tab_widget.widget(tab_index)
if not textEdit:
return
textEdit.setFontItalic(not self.italic_flag)
self.italic_flag = not self.italic_flag
def bold(self):
bold_button = self.toolbar.widgetForAction(self.boldclick)
bold_icon = QIcon("/".join([self.icon_path,"bold_icon.png"]))
tab_index = self.tab_widget.currentIndex()
textEdit = self.tab_widget.widget(tab_index)
if not textEdit:
return
font_weight = textEdit.fontWeight()
if font_weight == 50:
new_pixmap = bold_icon.pixmap(QtCore.QSize(20,20),QIcon.Disabled,QIcon.On)
font_weight = 75
textEdit.setFontWeight(font_weight)
else:
new_pixmap = bold_icon.pixmap(QtCore.QSize(20,20),QIcon.Active, QIcon.On)
font_weight = 50
textEdit.setFontWeight(font_weight)
new_icon = QIcon(new_pixmap)
bold_button.setIcon(new_icon)
def underline(self):
tab_index = self.tab_widget.c |
rezoo/chainer | chainer/datasets/concatenated_dataset.py | Python | mit | 939 | 0 | from chainer.dataset import dataset_mixin
class ConcatenatedDataset(dataset_mixin.DatasetMixin):
"""Dataset which concatenates some base datasets.
This dataset wraps some base datasets and works as a concatenated dataset.
For example, if a base dataset with 10 samples and
another base dataset with 20 samples are given, this dataset works as
a dataset which has 30 samples.
Args:
datasets: The underlying datasets. Each dataset has to support
:meth:`__len__` and :meth:`__getite | m__`.
"""
def __init__(self, *datasets):
self._datasets = datasets
def __len__(self):
return sum(len(dataset) for dataset in self._datasets)
def get_example(self, i):
if i < 0:
raise IndexError
for dataset in self._datase | ts:
if i < len(dataset):
return dataset[i]
i -= len(dataset)
raise IndexError
|
basmot/futsal_management | base/models/account_transaction.py | Python | apache-2.0 | 1,157 | 0.001729 | ##############################################################################
#
# Copyright 2015-2016 Bastien Mottiaux
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Lic | ense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################################################################### | #######
from django.db import models
from django.contrib import admin
from django.utils import timezone
class AccountTransactionAdmin(admin.ModelAdmin):
list_display = ('account', 'transaction')
class AccountTransaction(models.Model):
account = models.ForeignKey('Account')
transaction = models.ForeignKey('Transaction')
def __str__(self):
return self.account + " - " + transaction
|
LuoZijun/uOffice | temp/pydocxx/docx/opc/pkgreader.py | Python | gpl-3.0 | 10,107 | 0 | # encoding: utf-8
"""
Provides a low-level, read-only API to a serialized Open Packaging Convention
(OPC) package.
"""
from __future__ import absolute_import
from .constants import RELATIONSHIP_TARGET_MODE as RTM
from .oxml import parse_xml
from .packuri import PACKAGE_URI, PackURI
from .phys_pkg import PhysPkgReader
from .shared import CaseInsensitiveDict
class PackageReader(object):
"""
Provides access to the contents of a zip-format OPC package via its
:attr:`serialized_parts` and :attr:`pkg_srels` attributes.
"""
def __init__(self, content_types, pkg_srels, sparts):
super(PackageReader, self).__init__()
self._pkg_srels = pkg | _srels
self._sparts = sparts
@staticmethod
def from_file(pkg_file):
"""
Return a |PackageReader| instance loaded with contents of *pkg_file*.
"""
phys_reader = PhysPkgReader(pkg_file)
content_types = _ContentTypeMap.from_xml(phys_reader.content_types_xml)
pkg_srels = PackageReader._srels_for(phys_reader, PACKAGE_URI)
sparts = PackageReader._load_serialized_parts(
phys_reader, pkg_srels, content_types
)
phys | _reader.close()
return PackageReader(content_types, pkg_srels, sparts)
def iter_sparts(self):
"""
Generate a 4-tuple `(partname, content_type, reltype, blob)` for each
of the serialized parts in the package.
"""
for s in self._sparts:
yield (s.partname, s.content_type, s.reltype, s.blob)
def iter_srels(self):
"""
Generate a 2-tuple `(source_uri, srel)` for each of the relationships
in the package.
"""
for srel in self._pkg_srels:
yield (PACKAGE_URI, srel)
for spart in self._sparts:
for srel in spart.srels:
yield (spart.partname, srel)
@staticmethod
def _load_serialized_parts(phys_reader, pkg_srels, content_types):
"""
Return a list of |_SerializedPart| instances corresponding to the
parts in *phys_reader* accessible by walking the relationship graph
starting with *pkg_srels*.
"""
sparts = []
part_walker = PackageReader._walk_phys_parts(phys_reader, pkg_srels)
for partname, blob, reltype, srels in part_walker:
content_type = content_types[partname]
spart = _SerializedPart(
partname, content_type, reltype, blob, srels
)
sparts.append(spart)
return tuple(sparts)
@staticmethod
def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationships| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationships.load_from_xml(
source_uri.baseURI, rels_xml)
@staticmethod
def _walk_phys_parts(phys_reader, srels, visited_partnames=None):
"""
Generate a 4-tuple `(partname, blob, reltype, srels)` for each of the
parts in *phys_reader* by walking the relationship graph rooted at
srels.
"""
if visited_partnames is None:
visited_partnames = []
for srel in srels:
if srel.is_external:
continue
partname = srel.target_partname
if partname in visited_partnames:
continue
visited_partnames.append(partname)
reltype = srel.reltype
part_srels = PackageReader._srels_for(phys_reader, partname)
blob = phys_reader.blob_for(partname)
yield (partname, blob, reltype, part_srels)
next_walker = PackageReader._walk_phys_parts(
phys_reader, part_srels, visited_partnames
)
for partname, blob, reltype, srels in next_walker:
yield (partname, blob, reltype, srels)
class _ContentTypeMap(object):
"""
Value type providing dictionary semantics for looking up content type by
part name, e.g. ``content_type = cti['/ppt/presentation.xml']``.
"""
def __init__(self):
super(_ContentTypeMap, self).__init__()
self._overrides = CaseInsensitiveDict()
self._defaults = CaseInsensitiveDict()
def __getitem__(self, partname):
"""
Return content type for part identified by *partname*.
"""
if not isinstance(partname, PackURI):
tmpl = "_ContentTypeMap key must be <type 'PackURI'>, got %s"
raise KeyError(tmpl % type(partname))
if partname in self._overrides:
return self._overrides[partname]
if partname.ext in self._defaults:
return self._defaults[partname.ext]
tmpl = "no content type for partname '%s' in [Content_Types].xml"
raise KeyError(tmpl % partname)
@staticmethod
def from_xml(content_types_xml):
"""
Return a new |_ContentTypeMap| instance populated with the contents
of *content_types_xml*.
"""
types_elm = parse_xml(content_types_xml)
ct_map = _ContentTypeMap()
for o in types_elm.overrides:
ct_map._add_override(o.partname, o.content_type)
for d in types_elm.defaults:
ct_map._add_default(d.extension, d.content_type)
return ct_map
def _add_default(self, extension, content_type):
"""
Add the default mapping of *extension* to *content_type* to this
content type mapping.
"""
self._defaults[extension] = content_type
def _add_override(self, partname, content_type):
"""
Add the default mapping of *partname* to *content_type* to this
content type mapping.
"""
self._overrides[partname] = content_type
class _SerializedPart(object):
"""
Value object for an OPC package part. Provides access to the partname,
content type, blob, and serialized relationships for the part.
"""
def __init__(self, partname, content_type, reltype, blob, srels):
super(_SerializedPart, self).__init__()
self._partname = partname
self._content_type = content_type
self._reltype = reltype
self._blob = blob
self._srels = srels
@property
def partname(self):
return self._partname
@property
def content_type(self):
return self._content_type
@property
def blob(self):
return self._blob
@property
def reltype(self):
"""
The referring relationship type of this part.
"""
return self._reltype
@property
def srels(self):
return self._srels
class _SerializedRelationship(object):
"""
Value object representing a serialized relationship in an OPC package.
Serialized, in this case, means any target part is referred to via its
partname rather than a direct link to an in-memory |Part| object.
"""
def __init__(self, baseURI, rel_elm):
super(_SerializedRelationship, self).__init__()
self._baseURI = baseURI
self._rId = rel_elm.rId
self._reltype = rel_elm.reltype
self._target_mode = rel_elm.target_mode
self._target_ref = rel_elm.target_ref
@property
def is_external(self):
"""
True if target_mode is ``RTM.EXTERNAL``
"""
return self._target_mode == RTM.EXTERNAL
@property
def reltype(self):
"""Relationship type, like ``RT.OFFICE_DOCUMENT``"""
return self._reltype
@property
def rId(self):
"""
Relationship id, like 'rId9', corresponds to the ``Id`` attribute on
the ``CT_Relationship`` element.
"""
return self._rId
@property
def target_mode(self):
"""
String in ``TargetMode`` attribute of ``CT_Relationship`` element,
one of ``RTM.INTERNAL`` or ``RTM.EXTERNAL``.
"""
return self._target_mode
@property
def target_ref(self):
"""
String in ``Target`` attribute of ``CT_Relati |
fsimkovic/cptbx | conkit/io/_parser.py | Python | gpl-3.0 | 3,072 | 0.001953 | # BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parent classes for all parser classes
"""
__author__ = "Felix Simkovic"
__date__ = "04 Oct 2016"
__version__ = "0.1"
import abc
ABC = abc.ABCMeta("ABC", (object,), {})
from conkit.core.contact import Contact
from conkit.core.contactmap import C | ontactMap
from conkit.core.contactfile import ContactFile
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
class Parser(ABC):
"""Abstract class for all parsers
"""
@abc.abstractmethod
def read(self):
pass
@abc.abstractmethod
def write(sel | f):
pass
@classmethod
def _reconstruct(cls, hierarchy):
"""Wrapper to re-construct full hierarchy when parts are provided"""
if isinstance(hierarchy, ContactFile):
h = hierarchy
elif isinstance(hierarchy, ContactMap):
h = ContactFile("conkit")
h.add(hierarchy)
elif isinstance(hierarchy, Contact):
h = ContactFile("conkit")
m = ContactMap("1")
m.add(hierarchy)
h.add(m)
elif isinstance(hierarchy, SequenceFile):
h = hierarchy
elif isinstance(hierarchy, Sequence):
h = SequenceFile("conkit")
h.add(hierarchy)
return h
class ContactFileParser(Parser):
"""General purpose class for all contact file parsers"""
pass
class SequenceFileParser(Parser):
"""General purpose class for all sequence file parsers"""
pass
|
letsencrypt/letsencrypt | certbot-apache/certbot_apache/_internal/override_suse.py | Python | apache-2.0 | 670 | 0 | """ Distribution specific override class for OpenSUSE | """
from certbot_apache._internal import configurator
from certbot_apache._internal.configurator import OsOptions
class OpenSUSEConfigurator(configurator.Apache | Configurator):
"""OpenSUSE specific ApacheConfigurator override class"""
OS_DEFAULTS = OsOptions(
vhost_root="/etc/apache2/vhosts.d",
vhost_files="*.conf",
ctl="apachectl",
version_cmd=['apachectl', '-v'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod="a2enmod",
dismod="a2dismod",
challenge_location="/etc/apache2/vhosts.d",
)
|
johnttaylor/Outcast | bin/scm/rm.py | Python | bsd-3-clause | 1,756 | 0.007403 | # Short help
def display_summary():
print("{:<13}{}".format( 'rm', "Removes a previously copied SCM Repository" ))
# DOCOPT command line definition
USAGE="""
Removes a previously 'copied' repository
===============================================================================
usage: evie [common-opts] rm [options] <dst> <repo> <origin> <id>
evie [common-opts] rm [options] get-success-msg
evie [common-opts] rm [options] get-error-msg
Arguments:
<dst> PARENT directory for where the package was copied. The
directory is specified as a relative path to the root
of primary repository.
<repo> Name of the repository to remove
<origin> Path/URL to the repository
<id> Label/Tag/Hash/Version of code to be remove
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
| the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the c | ommand fails
Options:
-p PKGNAME Specifies the Package name if different from the <repo>
name
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
Options:
-h, --help Display help for this command
Notes:
o The command MUST be run in the root of the primary respostiory.
o This command only applied to repositories previously mounted using
the 'copy' command.
""" |
rsmz/copyright | test/test_walk.py | Python | gpl-3.0 | 2,190 | 0.00137 | import os
import unittest
from copyright import Walk
class TestWalk(unittest.TestCase):
DIRS = [
'./tmp/dir1/dir2',
'./tmp/dir3'
]
FILES = [
'./tmp/f',
'./tmp/dir1/f1',
'./tmp/dir3/f3',
'./tmp/dir1/dir2/f2'
]
def setUp(self):
for d in self.DIRS:
os.makedirs(d)
for p in self.FILES:
with open(p, 'w') as f:
pass
def tearDown(self):
for p in self.FILES:
if os.path.exists(p):
os.remove(p)
for d in self.DIRS:
if os.path.exists(d):
os.removedirs(d)
def test_default(self):
files = []
for f in Walk():
files.append(f)
for f in self.FILES:
self.assertTrue(f in files)
def test_exclude(self):
files = []
exclude = ['f1', 'f2']
for f in Walk(exclude=exclude):
files.append(f)
self.assertTrue(self.FILES[0] in files)
self.assertTrue(self.FILES[2] in files)
self.assertFalse(self.FILES[1] in files)
self.assertFalse(self.FILES[3] in files)
def test_include(self):
files = []
include = ['*1', 'di | r2']
for f i | n Walk(include=include):
files.append(f)
self.assertFalse(self.FILES[0] in files)
self.assertTrue(self.FILES[1] in files)
self.assertFalse(self.FILES[2] in files)
self.assertTrue(self.FILES[3] in files)
def test_include_regex(self):
files = []
include = ['f[\d]$']
for f in Walk(include=include, regex=True):
files.append(f)
self.assertTrue(self.FILES[1] in files)
self.assertTrue(self.FILES[2] in files)
self.assertTrue(self.FILES[3] in files)
self.assertFalse(self.FILES[0] in files)
def test_path(self):
files = []
for f in Walk(path='./tmp'):
files.append(f)
for f in self.FILES:
self.assertTrue(f in files)
if '__main__' == __name__:
unittest.main(verbosity=2)
|
tszym/ansible | lib/ansible/modules/network/netscaler/netscaler_gslb_site.py | Python | gpl-3.0 | 14,153 | 0.00318 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: netscaler_gslb_site
short_description: Manage gslb site entities in Netscaler.
description:
- Manage gslb site entities in Netscaler.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
sitename:
description:
- >-
Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the virtual server is created.
- "Minimum length = 1"
sitetype:
choices:
- 'REMOTE'
- 'LOCAL'
description:
- >-
Type of site to create. If the type is not specified, the appliance automatically detects and sets
the type on the basis of the IP address being assigned to the site. If the specified site IP address
is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site.
Otherwise, it is a remote site.
siteipaddress:
description:
- >-
IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB
sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or
MIP address, or the IP address of the ADNS service).
- "Minimum length = 1"
publicip:
description:
- >-
Public IP address for the local site. Required only if the appliance is deployed in a private address
space and the site has a public IP address hosted on an external firewall or a NAT device.
- "Minimum length = 1"
metricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The
appliances in the GSLB setup exchange health information once every second.
- >-
If you disable metrics exchange, you can use only static load balancing methods (such as round robin,
static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load
balancing method (such as least connection) is in operation, the appliance falls back to round robin.
Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB
services. Otherwise, the service is marked as DOWN.
nwmetricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from
communications with various local DNS (LDNS) servers used by clients. RTT information is used in the
dynamic RTT load balancing method, and is exchanged every 5 seconds.
sessionexchange:
choices:
- 'enabled'
- 'disabled'
description:
- "Exchange persistent session entries with other GSLB sites every five seconds."
triggermonitor:
choices:
- 'ALWAYS'
- 'MEPDO | WN'
- 'MEPDOWN_SVCDOWN'
description:
- >-
| Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound.
Available settings function as follows:
- "* C(ALWAYS) - Monitor the GSLB service at all times."
- >-
* C(MEPDOWN) - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange
Protocol (MEP) is disabled.
- "C(MEPDOWN_SVCDOWN) - Monitor the service in either of the following situations:"
- "* The exchange of metrics through MEP is disabled."
- >-
* The exchange of metrics through MEP is enabled but the status of the service, learned through
metrics exchange, is DOWN.
parentsite:
description:
- "Parent site of the GSLB site, in a parent-child topology."
clip:
description:
- >-
Cluster IP address. Specify this parameter to connect to the remote cluster site for GSLB auto-sync.
Note: The cluster IP address is defined when creating the cluster.
publicclip:
description:
- >-
IP address to be used to globally access the remote cluster when it is deployed behind a NAT. It can
be same as the normal cluster IP address.
naptrreplacementsuffix:
description:
- >-
The naptr replacement suffix configured here will be used to construct the naptr replacement field in
NAPTR record.
- "Minimum length = 1"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup gslb site
delegate_to: localhost
netscaler_gslb_site:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
sitename: gslb-site-1
siteipaddress: 192.168.1.1
sitetype: LOCAL
publicip: 192.168.1.1
metricexchange: enabled
nwmetricexchange: enabled
sessionexchange: enabled
triggermonitor: ALWAYS
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite import gslbsite
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection,
)
def gslb_site_exists(client, module):
if gslbsite.count_filtered(client, 'sitename:%s' % module.params['sitename']) > 0:
return True
else:
return False
def gslb_site_identical(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
diff_dict = gslb_site_proxy.diff_object(gslb_site_list[0])
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
return gslb_site_proxy.diff_object(gslb_site_list[0])
def main():
module_specific_arguments = dict(
sitename=dict(type='str'),
sitetype=dict(
type='str',
choices=[
'REMOTE',
'LOCAL',
]
),
siteipaddress=dict(type='str'),
publicip=dict(type='str'),
metricexchange=dict(
type='str',
|
jbms/beancount-import | beancount_import/source/ofx_test.py | Python | gpl-2.0 | 2,503 | 0.0004 | import os
import pytest
from . import ofx
from .source_test import check_source_example
testdata_dir = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'source', 'ofx'))
examples = [
('test_vanguard_basic', 'vanguard.ofx'),
('test_vanguard_matching', 'vanguard.ofx'),
('test_vanguard_invalid', 'vanguard.ofx'),
('test_vanguard_with_cash_account', 'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_transfer',
'vanguard.ofx'),
('test_vanguard_with_cash_account_matching_missing_primary',
'vanguard.ofx'),
('test_vanguard401k', 'vanguard401k.ofx'),
('test_vanguard_401k_matching', 'vanguard401k.ofx'),
('test_vanguard_xfer_in', 'vanguard_xfer_in.ofx'),
('test_fidelity_savings', 'fidelity-savings.ofx'),
('test_suncorp', 'suncorp.ofx'),
('test_checking', 'checking.ofx'),
('test_checking_emptyledgerbal', 'checking-emptyledgerbal.ofx'),
('test_td_ameritrade', 'td_ameritrade.ofx'),
('test_anzcc', 'anzcc.ofx'),
('test_multiple_accounts', 'multiple_accounts.ofx'),
('test_bank_medium', 'bank_medium.ofx'),
('test_investment_401k', 'investment_401k.ofx'),
('test_investment_buy_sell_income', 'investment_buy_sell_income.ofx'),
('test_vanguard_roth_ira', 'vanguard_roth_ira.ofx'),
('test_vanguard_roth_ira_matching', 'vanguard_roth_ira.ofx'),
('test_checking2', 'checking2.ofx'),
('test_checking2_matching', 'checking2.ofx'),
('test_amex', 'amex.ofx'),
('test_fidelity', 'fidelity.ofx'),
('test_non_default_capital_gains', 'vanguard401k.ofx'),
]
@pytest.mark.parametrize('name,ofx_filename', examples)
def test_source(name: str, ofx_filename: str):
check_source_example(
example_dir=os.path.join(testdata_dir, name),
source_spec={
'module': 'beancount_import.source.ofx',
'ofx_filenames': [os.path.join(testdata_dir, ofx_filename)],
},
replacements=[(testdata_dir, '<testdata>')])
def test_find_ofx_id_for_account():
ofx_ids = {
'Assets:Vanguard:401k': 1,
}
for (account, want) in [
| ('Assets:Vanguard:401k:PreTax:VGI1', 1),
('Assets:Vanguard:401k:PreTax', 1),
('Assets:Vanguard:401k:VG1', 1),
('Assets:Vanguard:401k', 1),
('Assets:Vanguard:Unknown', None),
('Assets:Vanguard:401k:PreTax:Excessive:VGI1', None),
]:
assert ofx.find_o | fx_id_for_account(account, ofx_ids) == want, account
|
daftspaniel/daftpyweek17 | jsonpickle/pickler.py | Python | bsd-3-clause | 10,329 | 0.000775 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import operator
import jsonpickle.util as util
import jsonpickle.tags as tags
import jsonpickle.handlers as handlers
from jsonpickle.backend import JSONBackend
from jsonpickle.compat import unicode
def encode(value,
unpicklable=False, make_refs=True, keys=False,
max_depth=None, reset=True,
backend=None, context=None):
backend = _make_backend(backend)
if context is None:
context = Pickler(unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth)
return backend.encode(context.flatten(value, reset=reset))
def _make_backend(backend):
if backend is None:
return JSONBackend()
else:
return backend
class Pickler(object):
def __init__(self,
unpicklable=True, make_refs=True, max_depth=None,
backend=None, keys=False):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = _make_backend(backend)
self.keys = keys
## The current recursion depth
self._depth = -1
## The maximal recursion depth
self._max_depth = max_depth
## Maps id(obj) to reference IDs
self._objs = {}
def reset(self):
self._objs = {}
self._depth = -1
def _push(self):
"""Steps down one level in the namespace.
"""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _mkref(self, obj):
objid = id(obj)
if objid not in self._objs:
new_id = len(self._objs)
self._objs[objid] = new_id
return True
# Do not use references if not unpicklable.
if not self.unpicklable or not self.make_refs:
return True
else:
return False
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world')
'hello world'
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'})
{'key': 'value'}
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
self._push()
max_reached = self._depth == self._max_depth
if max_reached or (not self.make_refs and id(obj) in self._objs):
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
return self._pop(flatten_func(obj))
def _get_flattener(self, obj):
if util.is_primitive(obj):
return lambda obj: obj
list_recurse = lambda obj: [self._flatten(v) for v in obj]
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
# else, what else? (methods, functions, old style classes...)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new
"""
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict
"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getstate = has_dict and hasattr(obj, '__getstate__')
has_getstate_support = has_getstate and hasattr(obj, '__setstate__')
HandlerClass = handlers.get(type(obj))
if has_class and not util.is_module(obj):
module, name = _getclassdetail(obj)
if self.unpicklable:
data[tags.OBJECT] = '%s.%s' % (module, name)
# Check for a custom handler
if HandlerClass:
handler = HandlerClass(self)
flat_obj = handler.flatten(obj, data)
self._mkref(flat_obj)
return flat_obj
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '%s/%s' % (obj.__name__,
obj.__name__)
else:
data = unicode(obj)
return data
if util.is_dictionary_subclass(obj):
return self._flatten_dict_obj(obj, data)
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
if has_getstate_support:
state = self._flatten(obj.__getstate__())
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_noncomplex(obj):
return [self._flatten(v) for v in obj]
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
def _flatten_dict_obj(self, obj, data=None):
| """Recursively call flatten() and return json-fri | endly dict
"""
if data is None:
data = obj.__class__()
flatten = self._flatten_key_value_pair
for k, v in sorted(obj.items(), key=operator.itemgetter(0)):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
flatten('default_factory', obj.default_factory, data)
return data
def _flatten_newstyle_with_slots(self, obj, |
MrSurly/micropython | tests/micropython/extreme_exc.py | Python | mit | 3,386 | 0.001181 | # test some extreme cases of allocating exceptions and tracebacks
import micropython
# Check for stackless build, which can't call functions without
# allocating a frame on the heap.
try:
def stackless():
pass
micropython.heap_lock()
stackless()
micropython.heap_unlock()
except RuntimeError:
print("SKIP")
raise SystemExit
# some ports need to allocate heap for the emergency exception
try:
micropython.alloc_emergency_exception_buf(256)
except AttributeError:
pass
def main():
# create an exception with many args while heap is locked
# should revert to empty tuple for args
micropython.heap_lock()
e = Exception(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
| 0,
)
micropython.heap_unlock()
print(repr(e))
| # create an exception with a long formatted error message while heap is locked
# should use emergency exception buffer and truncate the message
def f():
pass
micropython.heap_lock()
try:
f(
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1
)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:10])
# create an exception with a long formatted error message while heap is low
# should use the heap and truncate the message
lst = []
while 1:
try:
lst = [lst]
except MemoryError:
break
try:
f(
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1
)
except Exception as er:
e = er
lst[0][0] = None
lst = None
print(repr(e)[:10])
# raise a deep exception with the heap locked
# should use emergency exception and be unable to resize traceback array
def g():
g()
micropython.heap_lock()
try:
g()
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:13])
# create an exception on the heap with some traceback on the heap, but then
# raise it with the heap locked so it can't allocate any more traceback
exc = Exception("my exception")
try:
raise exc
except:
pass
def h(e):
raise e
micropython.heap_lock()
try:
h(exc)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e))
main()
|
Anislav/Stream-Framework | feedly/tests/managers/redis.py | Python | bsd-3-clause | 493 | 0 | from feedly.feed_managers.base import Feedly
from feedly.feeds.base import UserBaseFeed
from feedly.feeds.redis import RedisFeed
from feedly.tests.managers.base import | BaseFeedlyTest
import pytest
class RedisUserBaseFeed(UserBaseFeed, RedisFeed):
pass
class RedisFeedly(Feedly):
feed_classes = {
'feed': RedisFeed
}
user_feed_class = RedisUserBaseFeed
@pytest.mark.usefixtures("redis_reset")
class RedisFeedlyTest(BaseFeedlyTest) | :
manager_class = RedisFeedly
|
couchbaselabs/litmus | lib/pymongo/database.py | Python | apache-2.0 | 28,121 | 0.000071 | # Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database level operations."""
import warnings
from bson.binary import OLD_UUID_SUBTYPE
from bson.code import Code
from bson.dbref import DBRef
from bson.son import SON
from pymongo import common, helpers
from pymongo.collection import Collection
from pymongo.errors import (CollectionInvalid,
InvalidName,
OperationFailure)
from pymongo.son_manipulator import ObjectIdInjector
def _check_name(name):
"""Check if a database name is valid.
"""
if not name:
raise InvalidName("database name cannot be the empty string")
for invalid_char in [" ", ".", "$", "/", "\\"]:
if invalid_char in name:
raise InvalidName("database names cannot contain the "
"character %r" % invalid_char)
class Database(common.BaseObject):
"""A Mongo database.
"""
def __init__(self, connection, name):
"""Get a database by connection and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring`. Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `connection`: a :class:`~pymongo.connection.Connection`
instance
- `name`: database name
.. mongodoc:: databases
"""
super(Database,
self).__init__(slave_okay=connection.slave_okay,
read_preference=connection.read_preference,
safe=connection.safe,
**(connection.get_lasterror_options()))
if not isinstance(name, basestring):
raise TypeError("name must be an instance of basestring")
_check_name(name)
self.__name = unicode(name)
self.__connection = connection
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
self.add_son_manipulator(ObjectIdInjector())
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
Newly added manipulators will be applied before existing ones.
:Parameters:
- `manipulator`: the manipulator to add
"""
def method_overwritten(instance, method):
return getattr(instance, method) != \
getattr(super(instance.__class__, instance), method)
if manipulator.will_copy():
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_manipulators.insert(0, manipulator)
@property
def system_js(self):
"""A :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.
.. versionadded:: 1.5
"""
return SystemJS(self)
@property
def connection(self):
"""The :class:`~pymongo.connection.Connection` instance for this
:class:`Database`.
.. versionchanged:: 1.3
| ``connection`` is now a property rather than a method.
"""
return self.__connection
@property
def name(self):
"""The name of this :class:`Database`.
.. versionchanged:: 1.3
``name`` is now a property | rather than a method.
"""
return self.__name
@property
def incoming_manipulators(self):
"""List all incoming SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators]
@property
def incoming_copying_manipulators(self):
"""List all incoming SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_copying_manipulators]
@property
def outgoing_manipulators(self):
"""List all outgoing SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_manipulators]
@property
def outgoing_copying_manipulators(self):
"""List all outgoing SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_copying_manipulators]
def __cmp__(self, other):
if isinstance(other, Database):
return cmp((self.__connection, self.__name),
(other.__connection, other.__name))
return NotImplemented
def __repr__(self):
return "Database(%r, %r)" % (self.__connection, self.__name)
def __getattr__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self, name)
def __getitem__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return self.__getattr__(name)
def create_collection(self, name, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this
method. Any of the following options are valid:
- "size": desired initial size for the collection (in
bytes). must be less than or equal to 10000000000. For
capped collections this size is the max size of the
collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
:Parameters:
- `name`: the name of the collection to create
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 2.1.1+
Removed deprecated argument: options
.. versionchanged:: 1.5
deprecating `options` in favor of kwargs
"""
opts = {"create": True}
opts.update(kwargs)
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, **opts)
def _fix_incoming(self, son, collection):
"""Apply manipulators to an incoming SON object before it gets stored.
:Parameters:
- `son`: the |
camptocamp/QGIS | python/plugins/GdalTools/tools/doExtractProj.py | Python | gpl-2.0 | 7,191 | 0.038798 | # -*- coding: utf-8 -*-
"""
***************************************************************************
doExtractProj.py
---------------------
Date : August 2011
Copyright : (C) 2011 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2011'
__copyright__ = '(C) 2011, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_dialogExtractProjection import Ui_GdalToolsDialog as Ui_Dialog
import GdalTools_utils as Utils
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError, e:
error_str = e.args[ 0 ]
error_mod = error_str.replace( "No module named ", "" )
if req_mods.has_key( error_mod ):
error_str = error_str.replace( error_mod, req_mods[error_mod] )
raise ImportError( error_str )
class GdalToolsDialog( QDialog, Ui_Dialog ):
def __init__( self, iface ):
QDialog.__init__( self, iface.mainWindow() )
self.setupUi( self )
self.iface = iface
self.inSelector.setType( self.inSelector.FILE )
self.recurseCheck.hide()
self.okButton = self.buttonBox.button( QDialogButtonBox.Ok )
self.cancelButton = self.buttonBox.button( QDialogButtonBox.Cancel )
self.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFileEdit )
self.connect( self.batchCheck, SIGNAL( "stateChanged( int )" ), self.switchToolMode )
def switchToolMode( self ):
self.recurseCheck.setVisible( self.batchCheck.isChecked() )
self.inSelector.clear()
if self.batchCheck.isChecked():
self.inFileLabel = self.label.text()
self.label.setText( QCoreApplication.translate( "GdalTools", "&Input directory" ) )
QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFileEdit )
QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir )
else:
self.label.setText( self.inFileLabel )
QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFileEdit )
QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir )
def fillInputFileEdit( self ):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
inputFile = Utils.FileDialog.getOpenFileName( self, self.tr( "Select the file to analyse" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if not inputFile:
return
Utils.FileFilter.setLastUsedRasterFilter( lastUsedFilter )
self.inSelector.setFilename( inputFile )
def fillInputDir( self ):
inputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the input directory with files to Assign projection" ))
if not inputDir:
return
self.inSelector.setFilename( inputDir )
def reject( self ):
QDialog.reject( self )
def accept( self ):
self.inFiles = None
if self.batchCheck.isChecked():
self.inFiles = Utils.getRasterFiles( self.inSelector.filename(), self.recurseCheck.isChecked() )
else:
self.inFiles = [ self.inSelector.filename() ]
self.progressBar.setRange( 0, len( self.inFiles ) )
QApplication.setOverrideCursor( QCursor( Qt.WaitCursor ) )
self.okButton.setEnabled( False )
self.extractor = ExtractThread( self.inFiles, self.prjCheck.isChecked() )
QObject.connect( self.extractor, SIGNAL( "fileProcessed()" ), self.updateProgress )
QObject.connect( self.extractor, SIGNAL( "processFinished()" ), self.processingFinished )
QObject.connect( self.extractor, SIGNAL( "processInterrupted()" ), self.processingInterrupted )
QObject.disconnect( self.buttonBox, SIGNAL( "rejected()" ), self.reject )
QObject.connect( self.buttonBox, SIGNAL( "rejected()" ), self.stopProcessing )
self.extractor.start()
def updateProgress( self ):
self.progressBar.setValue( self.progressBar.value() + 1 )
def processingFinished( self ):
self.stopProcessing()
def processingInterrupted( self ):
self.restoreGui()
def stopProcessing( self ):
if self.extractor != None:
self.extractor.stop()
self.extractor = None
self.restoreGui()
def restoreGui( self ):
self.progressBar.setRange( 0, 100 )
self.progressBar.setValue( 0 )
QApplication.restoreOverrideCursor()
QObject.disconnect( self.buttonBox, SIGNAL( "rejected()" ), self.stopProcessing )
QObject.conne | ct( self.buttonBox, SIGNAL( "rejected()" ), self.reject )
self.okButton.setEnabled( True )
# ----------------------------------------------------------------------
def extractProjection( filename, createPrj ):
raster = gdal.Open( unicode( filename ) )
crs = raster.GetPro | jection()
geotransform = raster.GetGeoTransform()
raster = None
outFileName = os.path.splitext( unicode( filename ) )[0]
# create prj file requested and if projection available
if crs != "" and createPrj:
# convert CRS into ESRI format
tmp = osr.SpatialReference()
tmp.ImportFromWkt( crs )
tmp.MorphToESRI()
crs = tmp.ExportToWkt()
tmp = None
prj = open( outFileName + '.prj', 'wt' )
prj.write( crs )
prj.close()
# create wld file
wld = open( outFileName + '.wld', 'wt')
wld.write( "%0.8f\n" % geotransform[1] )
wld.write( "%0.8f\n" % geotransform[4] )
wld.write( "%0.8f\n" % geotransform[2] )
wld.write( "%0.8f\n" % geotransform[5] )
wld.write( "%0.8f\n" % (geotransform[0] + 0.5 * geotransform[1] + 0.5 * geotransform[2] ) )
wld.write( "%0.8f\n" % (geotransform[3] + 0.5 * geotransform[4] + 0.5 * geotransform[5] ) )
wld.close()
class ExtractThread( QThread ):
def __init__( self, files, needPrj ):
QThread.__init__( self, QThread.currentThread() )
self.inFiles = files
self.needPrj = needPrj
self.mutex = QMutex()
self.stopMe = 0
def run( self ):
self.mutex.lock()
self.stopMe = 0
self.mutex.unlock()
interrupted = False
for f in self.inFiles:
extractProjection( f, self.needPrj )
self.emit( SIGNAL( "fileProcessed()" ) )
self.mutex.lock()
s = self.stopMe
self.mutex.unlock()
if s == 1:
interrupted = True
break
if not interrupted:
self.emit( SIGNAL( "processFinished()" ) )
else:
self.emit( SIGNAL( "processIterrupted()" ) )
def stop( self ):
self.mutex.lock()
self.stopMe = 1
self.mutex.unlock()
QThread.wait( self )
|
sprin/heroku-tut | worker/word_count.py | Python | mit | 1,663 | 0.007817 | from collections import defaultdict
import re
import sys
from stop_words import STOP_WORD_SET
from collections import Counter
PUNCTUATION_RE = re.compile("[%s]" % re.escape(
"""!"&()*+,-\.\/:;<=>?\[\\\]^`\{|\}~]+"""))
DISCARD_RE = re.compile("^('{|`|git@|@|https?:)")
def remove_stop_words(word_seq, stop_words):
"""Sanitize using intersection and list.remove()"""
return [w for w in word_seq if w and w not in stop_words]
def remove_punctuation(word_seq):
def remov | e_punc_inner(word):
return PUNCTUATION_RE.sub("", word)
removed = map(remove_punc_inner, word_seq)
# Remove emptry strings
return [w for | w in removed if w]
def filter_discards(word_seq):
def discard(word):
return not DISCARD_RE.match(word)
return filter(discard, word_seq)
def count_words_from_seq(word_seq):
word_count = defaultdict(int)
for word in word_seq:
word_count[word] += 1
return word_count
def keep_top_n_words(word_counts, n):
return dict(Counter(word_counts).most_common(n))
def count_words(text_blob):
word_seq = re.split('[=|\s]+', text_blob.lower())
print ' Splitting blob'
word_seq = filter_discards(word_seq)
print ' Filtering discards'
word_seq = remove_punctuation(word_seq)
print ' Removing punctuation'
word_seq = remove_stop_words(word_seq, STOP_WORD_SET)
print ' Removing stop words'
word_counts = count_words_from_seq(word_seq)
print ' Counting words'
top_n = keep_top_n_words(word_counts, 100)
print ' Filtering to top 100 words'
return top_n
if __name__ == '__main__':
print count_words(sys.stdin.read())
|
psycofdj/xtdpy | xtd/core/logger/__init__.py | Python | gpl-3.0 | 419 | 0.019093 | # -*- coding: utf-8
#------------------------------------------------------------------#
__author__ = "Xavier MARCELET <xavier@marcelet.com>"
#------------------------------------------------------------------#
import logging
from . import formatter, manager
from .tools import get, debug, info, warning, error, cri | tical, exception, log
#------------------------------------------------------------ | ------#
|
dstufft/warehouse | warehouse/migrations/versions/10cb17aea73_default_hosting_mode_to_pypi_only.py | Python | apache-2.0 | 1,098 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except i | n compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in | writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Default hosting mode to pypi-only
Revision ID: 10cb17aea73
Revises: 41abd35caa3
Create Date: 2015-09-03 01:18:55.288971
"""
from alembic import op
revision = "10cb17aea73"
down_revision = "41abd35caa3"
def upgrade():
op.alter_column(
"packages",
"hosting_mode",
server_default="pypi-only",
existing_server_default="pypi-explicit",
)
def downgrade():
op.alter_column(
"packages",
"hosting_mode",
server_default="pypi-explicit",
existing_server_default="pypi-only",
)
|
apache/incubator-airflow | tests/providers/google/cloud/operators/test_workflows.py | Python | apache-2.0 | 12,510 | 0.00032 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
import pytz
from airflow.providers.google.cloud.operators.workflows import (
WorkflowsCancelExecutionOperator,
WorkflowsCreateExecutionOperator,
WorkflowsCreateWorkflowOperator,
WorkflowsDeleteWorkflowOperator,
WorkflowsGetExecutionOperator,
WorkflowsGetWorkflowOperator,
WorkflowsListExecutionsOperator,
WorkflowsListWorkflowsOperator,
WorkflowsUpdateWorkflowOperator,
)
BASE_PATH = "airflow.providers.google.cloud.operators.workflows.{}"
LOCATION = "europe-west1"
WORKFLOW_ID = "workflow_id"
EXECUTION_ID = "execution_id"
WORKFLOW = {"aa": "bb"}
EXECUTION = {"ccc": "ddd"}
PROJECT_ID = "airflow-testing"
METADATA = None
TIMEOUT = None
RETRY = None
FILTER_ = "aaaa"
ORDER_BY = "bbb"
UPDATE_MASK = "aaa,bbb"
GCP_CONN_ID = "test-conn"
IMPERSONATION_CHAIN = None
class TestWorkflowsCreateWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsCreateWorkflowOperator(
task_id="test_task",
workflow=WORKFLOW,
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_workflow.assert_called_once_with(
workflow=WORKFLOW,
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowsUpdateWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsUpdateWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
update_mask=UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.update_workflow.assert_called_once_with(
workflow=mock_hook.return_value.get_workflow.return_value,
update_mask=UPDATE_MASK,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowsDeleteWorkflowOperator:
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(
self,
mock_hook,
):
op = WorkflowsDeleteWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
class TestWorkflowsListWorkflowsOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
workflow_mock = mock.MagicMock()
workflow_mock.start_time = datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(minutes=5)
mock_hook.return_value.list_workflows.return_value = [workflow_mock]
op = WorkflowsListWorkflowsOperator(
task_id="test_task",
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_workflows.assert_called_once_with(
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == [mock_object.to_dict.return_value]
class TestWorkflowsGetWorkflowOperator:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsGetWorkflowOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
| gc | p_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute({})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_workflow.assert_called_once_with(
workflow_id=WORKFLOW_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
class TestWorkflowExecutionsCreateExecutionOperator:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
@mock.patch(BASE_PATH.format("WorkflowsCreateExecutionOperator.xcom_push"))
def test_execute(self, mock_xcom, mock_hook, mock_object):
mock_hook.return_value.create_execution.return_value.name = "name/execution_id"
op = WorkflowsCreateExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution=EXECUTION,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
|
DlutCS/nserver_py | views/api/news.py | Python | unlicense | 2,540 | 0.007899 | # -*- coding: utf-8 -*-
from views.api import api, restful, error
from flask import request, render_template
from utils.consts import *
from models.news import News
from models.category import Category
@restful('/category/')
def get_categorys():
data = {}
categorys = Category.get_all()
data['categories | '] = categorys
data['total'] = len(categorys)
return data
@restful('/news/<id>/')
def get_news(id): |
if id.isdigit():
news = News.get(id=id)
else:
news = News.get_by_alias(alias=id)
if not news:
return error(10003, 'news id not found')
return news
@restful('/newslist/')
@restful('/newslist/latest/')
def news_latest():
data = {}
start = request.args.get('start', 0)
limit = request.args.get('limit', PAGE_LIMIT)
template = request.args.get('template', False)
rs = News.get_all(order='create_time desc', start=int(start), limit=int(limit));
data['count'] = len(rs)
if template:
data['template'] = render_template('component/news_loop.html', data=rs)
else:
data['newslist'] = rs
return data
@restful('/newslist/popular/')
def news_popular():
data = {}
start = request.args.get('start', 0)
limit = request.args.get('limit', PAGE_LIMIT)
rs = News.get_all(order='read_count desc', start=int(start), limit=int(limit));
data['count'] = len(rs)
data['newslist'] = rs
return data
@restful('/newslist/category/<int:cid>/')
@restful('/newslist/category/<int:cid>/latest/')
def news_by_category_latest(cid):
data = {}
start = request.args.get('start', 0)
limit = request.args.get('limit', PAGE_LIMIT)
template = request.args.get('template', False)
if cid == 1: # 头条内容
rs = News.get_all(order='create_time desc', start=int(start), limit=int(limit))
else:
rs = News.get_by_category(cid=cid, order='create_time desc', start=int(start), limit=int(limit))
data['count'] = len(rs)
if template:
data['template'] = render_template('component/news_loop.html', data=rs)
else:
data['newslist'] = rs
return data
@restful('/newslist/category/<int:cid>/')
@restful('/newslist/category/<int:cid>/popular/')
def news_by_category_popular(cid):
data = {}
start = request.args.get('start', 0)
limit = request.args.get('limit', PAGE_LIMIT)
rs = News.get_by_category(cid=cid, order='read_count desc', start=int(start), limit=int(limit))
data['count'] = len(rs)
data['newslist'] = rs
return data
|
Jackevansevo/basic-utils | tests/test_core.py | Python | mit | 2,606 | 0 | from unittest.mock import MagicMock, Mock, mock_open, patch
import pytest # type: ignore
from basic | _utils.core import (
clear,
getattrs,
map_getattr,
rgetattr,
rsetattr,
slurp,
to_string
)
def test_slurp() -> None:
"""Tests that slurp reads in contents of a file as a string"""
data = "In the face of ambiguity, refuse the temptation to guess."
with patch("builtins.open", mo | ck_open(read_data=data)) as mock_file:
file_contents = slurp('text.txt')
mock_file.assert_called_once_with('text.txt', 'r')
assert file_contents == data
@pytest.mark.parametrize("platform, expected", [
('posix', 'clear'),
('nt', 'cls')
])
def test_clear(platform: str, expected: str) -> None:
"""
Tests that os.system is called with the correct string corresponding to
the host OS name
"""
with patch('basic_utils.core.name', platform):
with patch('basic_utils.core.system') as mock_system:
clear()
mock_system.assert_called_once_with(expected)
def test_to_string() -> None:
# Create two mock class instances which implement __str__
objectX, objectY = Mock(), Mock()
objectX.__str__ = Mock(return_value='Homer') # type: ignore
objectY.__str__ = Mock(return_value='Bart') # type: ignore
assert to_string([objectX, objectY]) == "Homer, Bart"
assert to_string([1, 2, 3]) == "1, 2, 3"
def test_getattrs() -> None:
# Create a single mock class instance with two sample attributes
mock_obj = Mock(forename='Homer', age=39) # type: ignore
assert getattrs(mock_obj, ('forename', 'age')) == ('Homer', 39)
def test_map_getattr() -> None:
# Create two mock class instances with a sample attribute
objectX = Mock(forename='Homer') # type: ignore
objectY = Mock(forename='Bart') # type: ignore
assert map_getattr('forename', (objectX, objectY)) == ('Homer', 'Bart')
class TestRecursiveGettersAndSetters:
@classmethod
def setup_class(cls) -> None:
cls.child = MagicMock(forename='Bart') # type: ignore
cls.homer = MagicMock(child=cls.child) # type: ignore
def test_rgetattr(self) -> None:
"""
Tests that rgetattr returns returns nested values within objects
"""
assert rgetattr(self.homer, 'child.forename') == 'Bart' # type: ignore
def test_rsetattr(self) -> None:
"""
Tests that rsetattr sets the value of a nested attribute
"""
rsetattr(self.homer, 'child.name', 'Lisa') # type: ignore
assert self.child.name == 'Lisa' # type: ignore
|
JOUR491-NewsApplications/JOUR491-FoodOnCampus | food/food/urls.py | Python | mit | 314 | 0.006369 | from django.conf.urls import patterns, include, url
from django.contrib im | port admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'menu.views.home', name='home'),
url(r'^hall/(?P<slug>[-\w]+)/$', 'menu.views.halldetail', name='halldetail'),
url(r | '^admin/', include(admin.site.urls)),
)
|
robin1885/algorithms-exercises-using-python | source-code-from-author-book/Listings-for-Second-Edition/listing_1_10.py | Python | mit | 244 | 0.004098 | class LogicGate | :
def __init__(self,n):
self.label = n
self.output = None
def getLabel(self):
re | turn self.label
def getOutput(self):
self.output = self.performGateLogic()
return self.output
|
timothypage/etor | etor/samples/admin.py | Python | mit | 182 | 0.005495 | from sample | s.models import Insurance, Patient, Specimen
from django.contrib import admin
admin.site.register(Insurance)
admin.site.register(Patient)
admin.site.register(Sp | ecimen)
|
Nanolx/bashstyle-ng | ui/args.py | Python | gpl-3.0 | 3,457 | 0.000579 | # coding=utf-8
# ##################################################### #
# #
# This is BashStyle-NG #
# #
# Licensed under GNU GENERAL PUBLIC LICENSE v3 #
# #
# Copyright 2007 - 2020 Christopher Bratusek #
# #
# ##################################################### #
MODULES = ['os', 'sys', 'optparse']
FAILED = []
for module in MODULES:
try:
globals()[module] = __import__(module)
except ImportError:
FAILED.append(module)
if FAILED:
print(
_("The following modules failed to import: %s")
% (" ".join(FAILED))
)
sys.exit(1)
class CmdArgs(object):
parser = optparse.OptionParser(
_("\n bashstyle <option>\n\n\
BashStyle-NG © 2007 - 2020 Christopher Bratusek\n\
Licensed under the GNU GENERAL PUBLIC LICENSE v3")
)
parser.add_option(
"-v", "--version", dest="version", action="store_true",
default=False, help=_("print version and exit")
)
parser.add_option(
"-p", "--prefix", dest="prefix", action="store_true",
default=False, help=_("print instal | lation prefix and ex | it")
)
parser.add_option(
"-P", "--python", dest="python", action="store_true",
default=False, help=_("print used Python interpreter; \
if additional args are given they will be passed to the used Python \
interpreter.")
)
parser.add_option(
"-d", "--doc", dest="doc", action="store_true",
default=False, help=_("open HTML documentation and exit")
)
parser.add_option(
"-l", "--log", dest="log", action="store_true",
default=False, help=_("view BashStyle-NG log file")
)
parser.add_option(
"-u", "--update", dest="update", action="store_true",
default=False, help=_("update user configuration and exit")
)
parser.add_option(
"-b", "--backup", dest="backup", action="store_true",
default=False, help=_("backup user configuration and exit")
)
parser.add_option(
"-r", "--restore", dest="restore", action="store_true",
default=False, help=_("restore user configuration backup and exit")
)
parser.add_option(
"-R", "--reset", dest="reset", action="store_true",
default=False, help=_("reset user configuration and exit")
)
parser.add_option(
"-i", "--ini-get", dest="ini-get", action="store_true",
default=False, help=_("get the value of the specified setting")
)
parser.add_option(
"-I", "--ini-set", dest="ini-set", action="store_true",
default=False, help=_("set the value of the specified setting")
)
parser.add_option(
"-E", "--enable", dest="enable", action="store_true",
default=False, help=_("enable BashStyle-NG")
)
parser.add_option(
"-D", "--disable", dest="disable", action="store_true",
default=False, help=_("disable BashStyle-NG")
)
(options, args) = parser.parse_args()
if options.version:
print("%s (%s)" %(os.getenv('BSNG_VERSION'), os.getenv('BSNG_CODENAME')))
sys.exit(0)
if options.prefix:
print("%s" % os.getenv('BSNG_PREFIX'))
sys.exit(0)
|
mccorkle/seds-utils | Sbs.py | Python | gpl-3.0 | 13,196 | 0.003107 | class Sbs:
def __init__(self, sbsFilename, sbc_filename, newSbsFilename):
import xml.etree.ElementTree as ET
import Sbc
self.mySbc = Sbc.Sbc(sbc_filename)
self.sbsTree = ET.parse(sbsFilename)
self.sbsRoot = self.sbsTree.getroot()
self.XSI_TYPE = "{http://www.w3.org/2001/XMLSchema-instance}type"
self.newSbsFilename = newSbsFilename
def findPlayerBySteamID(self, steam_id):
if (steam_id == 0):
return False
print("looking for player with steamID of %s" % steam_id)
ourPlayerDict = self.mySbc.getPlayerDict()
for player in ourPlayerDict:
# print playerDict[player]['steamID']
if ourPlayerDict[player]['steamID'] == steam_id:
return ourPlayerDict[player]
# if we don't find the user
return False
def giveReward(self, rewardOwner, rewardType, rewardAmount):
"""
This method will hunt down the first cargo container owned by
<Owner> matching their ingame ID, and with with "CustomName"
of "LOOT" and place the rewards in it
"""
import xml.etree.ElementTree as ET
print("trying to give %s %s units of %s" % (rewardOwner, rewardAmount, rewardType))
for sectorObjects in self.sbsRoot.iter('SectorObjects'):
for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'):
# EntityId = entityBase.find('EntityId')
# print ("checking entityID %s" % EntityId.text)
gridSize = entityBase.find('GridSizeEnum')
# TODO+: some kind of warning if we have a reward to give, but can't find this user's LOOT container
if hasattr(gridSize, 'text'):
cubeBlocks = entityBase.find('CubeBlocks')
for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'):
owner = myCubeBlock.find("Owner")
EntityId = myCubeBlock.find('EntityId')
customName = myCubeBlock.find('CustomName')
if hasattr(owner, 'text') and owner.text == rewardOwner and myCubeBlock.get(self.XSI_TYPE) == "MyObjectBuilder_CargoContainer" and hasattr(customName, 'text'):
if "LOOT" in customName.text:
print("I found a cargo container owned by %s with entityID of %s and name of %s" % (owner.text, EntityId.text, customName.text))
componentContainer = myCubeBlock.find('ComponentContainer')
components = componentContainer.find('Components')
componentData = components.find('ComponentData')
component = componentData.find('Component')
items = component.find('Items')
itemCount = 0
for myInventoryItems in items.iter('MyObjectBuilder_InventoryItem'):
itemCount += 1
print("planning to add %s of %s into it as item %s" % (rewardAmount, rewardType, itemCount))
# <MyObjectBuilder_InventoryItem>
# <Amount>200</Amount>
# <PhysicalContent xsi:type="MyObjectBuilder_Ore">
# <SubtypeName>Uranium</SubtypeName> ## from rewardType
# </PhysicalContent>
# <ItemId>4</ItemId> ## from itemCount
# <AmountDecimal>200</AmountDecimal> ## from rewardAmount
# </MyObjectBuilder_InventoryItem>
# myCubeBlock.append((ET.fromstring('<MyObjectBuilder_InventoryItem><Amount>123456789</Amount></MyObjectBuilder_InventoryItem>')))
inventoryItem = ET.SubElement(items, 'MyObjectBuilder_InventoryItem')
amount = ET.SubElement(inventoryItem, 'Amount')
amount.text = str(rewardAmount)
physicalContent = ET.SubElement(inventoryItem, 'PhysicalContent')
physicalContent.set(self.XSI_TYPE, 'MyObjectBuilder_Ore')
subtypeName = ET.SubElement(physicalContent, 'SubtypeName')
subtypeName.text = rewardType
itemId = ET.SubElement(inventoryItem, 'ItemId')
itemId.text = str(itemCount)
amountDecimal = ET.SubElement(inventoryItem, 'AmountDecimal')
amountDecimal.text = str(rewardAmount)
nextItemId = component.find('nextItemId')
nextItemId.text = str(itemCount + 1)
# FIXME: this makes a mess of the html, figure out a way to clean it up?
def removeFloaters(self):
import xml.etree.ElementTree as ET
removedCount = 0
warnCount = 0
for sectorObjects in self.sbsRoot.iter('SectorObjects'):
for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'):
cubeGridID = entityBase.find('EntityId')
gridSizeEnum = entityBase.find('GridSizeEnum')
objectType = entityBase.get(self.XSI_TYPE)
isStatic = entityBase.find('IsStatic') # FIXME: this does not do what I thought it did. Tested with simple station, and it isn't set as static when I build it from scratch.
# TODO: only way I can see to easily fix is check for <Forward x="-0" y="-0" z="-1" /> for static things
# print cubeGridID.text if hasattr(cubeGridID, 'text') else 'not defined'
if hasattr(cubeGridID, 'text'):
print("Grid EntityID: %s " % cubeGridID.text)
else:
print("FIXME: no gridID")
# print ("\t is objectType %s" % objectType )
if hasattr(isStatic, 'text'):
# this is a base, all of our checks are null and void. Bases don't float or cost me CPU
print("\t skipping trash checks because this IsStatic")
continue
if hasattr(gridSizeEnum, 'text'):
# is a grid, small or large
gridName = entityBase.find('DisplayName').text
print("\t is a grid size %s %s" % (gridSizeEnum.text, gridName))
# if the name contains DEL.WRN
if "[DEL.WRN]" in gridName:
print("\t ALREADY HAD DEL.WRN in the NAME, GOODBYE")
sectorObjects.remove(entityBase)
removedCount += 1
else:
# it doesn't have a DEL WRN yet, lets check for our | rules
# TODO: look through the whole entityBase for 6 thrusters, a power supply, and at least one block not owned by pirates
thrusterCount = 0
powerSource = 0
controlSurface = 0
gyroCount = 0
| turretCount = 0
ownerCount = 0
ownedThings = 0
ownerList = []
cubeBlocks = entityBase.find('CubeBlocks')
for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'):
owner = myCubeBlock.find("Owner")
# subtype = myCubeBlock.find('SubtypeName')
cubeType = myCubeBlock.get(self.XSI_TYPE)
entityID = myCubeBlock.find("EntityId")
# print ("\t\tTODO: cubeType of: %s" % cubeType)
if "Thrust" in cubeType:
thruste |
proyectos-analizo-info/pybossa-analizo-info | pybossa/sched.py | Python | agpl-3.0 | 7,472 | 0.00348 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, | see <http://www.gnu.org/licenses/>.
#import json
#from flask import Blueprint, request, url_for, flash, redirect, abort
#from flask import abort, request, make_response, current_app
from sqlalchemy.sql import text
import pybossa.model as model
from pybossa.core import db
import random
def new_task(app_id, user_id=None, user_ip=None, offset=0):
'''Get a new | task by calling the appropriate scheduler function.
'''
app = db.session.query(model.app.App).get(app_id)
if not app.allow_anonymous_contributors and user_id is None:
error = model.task.Task(info=dict(error="This project does not allow anonymous contributors"))
return error
else:
sched_map = {
'default': get_depth_first_task,
'breadth_first': get_breadth_first_task,
'depth_first': get_depth_first_task,
'random': get_random_task,
'incremental': get_incremental_task}
sched = sched_map.get(app.info.get('sched'), sched_map['default'])
return sched(app_id, user_id, user_ip, offset=offset)
def get_breadth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets a new task which have the least number of task runs (excluding the
current user).
Note that it **ignores** the number of answers limit for efficiency reasons
(this is not a big issue as all it means is that you may end up with some
tasks run more than is strictly needed!)
"""
# Uncomment the next three lines to profile the sched function
#import timeit
#T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,
# user_ip, n_answers))
#print "First algorithm: %s" % T.timeit(number=1)
if user_id and not user_ip:
sql = text('''
SELECT task.id, COUNT(task_run.task_id) AS taskcount FROM task
LEFT JOIN task_run ON (task.id = task_run.task_id) WHERE NOT EXISTS
(SELECT 1 FROM task_run WHERE app_id=:app_id AND
user_id=:user_id AND task_id=task.id)
AND task.app_id=:app_id AND task.state !='completed'
group by task.id ORDER BY taskcount, id ASC LIMIT 10;
''')
tasks = db.engine.execute(sql, app_id=app_id, user_id=user_id)
else:
if not user_ip: # pragma: no cover
user_ip = '127.0.0.1'
sql = text('''
SELECT task.id, COUNT(task_run.task_id) AS taskcount FROM task
LEFT JOIN task_run ON (task.id = task_run.task_id) WHERE NOT EXISTS
(SELECT 1 FROM task_run WHERE app_id=:app_id AND
user_ip=:user_ip AND task_id=task.id)
AND task.app_id=:app_id AND task.state !='completed'
group by task.id ORDER BY taskcount, id ASC LIMIT 10;
''')
# results will be list of (taskid, count)
tasks = db.engine.execute(sql, app_id=app_id, user_ip=user_ip)
# ignore n_answers for the present - we will just keep going once we've
# done as many as we need
tasks = [x[0] for x in tasks]
if tasks:
if (offset == 0):
return db.session.query(model.task.Task).get(tasks[0])
else:
if (offset < len(tasks)):
return db.session.query(model.task.Task).get(tasks[offset])
else:
return None
else: # pragma: no cover
return None
def get_depth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets a new task for a given project"""
# Uncomment the next three lines to profile the sched function
#import timeit
#T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,
# user_ip, n_answers))
#print "First algorithm: %s" % T.timeit(number=1)
candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip, n_answers, offset=offset)
total_remaining = len(candidate_tasks)
#print "Available tasks %s " % total_remaining
if total_remaining == 0:
return None
if (offset == 0):
return candidate_tasks[0]
else:
if (offset < len(candidate_tasks)):
return candidate_tasks[offset]
else:
return None
def get_random_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Returns a random task for the user"""
app = db.session.query(model.app.App).get(app_id)
from random import choice
if len(app.tasks) > 0:
return choice(app.tasks)
else:
return None
def get_incremental_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Get a new task for a given project with its last given answer.
It is an important strategy when dealing with large tasks, as
transcriptions"""
candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip, n_answers, offset=0)
total_remaining = len(candidate_tasks)
if total_remaining == 0:
return None
rand = random.randrange(0, total_remaining)
task = candidate_tasks[rand]
#Find last answer for the task
q = db.session.query(model.task_run.TaskRun)\
.filter(model.task_run.TaskRun.task_id == task.id)\
.order_by(model.task_run.TaskRun.finish_time.desc())
last_task_run = q.first()
if last_task_run:
task.info['last_answer'] = last_task_run.info
#TODO: As discussed in GitHub #53
# it is necessary to create a lock in the task!
return task
def get_candidate_tasks(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
"""Gets all available tasks for a given project and user"""
rows = None
if user_id and not user_ip:
query = text('''
SELECT id FROM task WHERE NOT EXISTS
(SELECT task_id FROM task_run WHERE
app_id=:app_id AND user_id=:user_id AND task_id=task.id)
AND app_id=:app_id AND state !='completed'
ORDER BY priority_0 DESC, id ASC LIMIT 10''')
rows = db.engine.execute(query, app_id=app_id, user_id=user_id)
else:
if not user_ip:
user_ip = '127.0.0.1'
query = text('''
SELECT id FROM task WHERE NOT EXISTS
(SELECT task_id FROM task_run WHERE
app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)
AND app_id=:app_id AND state !='completed'
ORDER BY priority_0 DESC, id ASC LIMIT 10''')
rows = db.engine.execute(query, app_id=app_id, user_ip=user_ip)
tasks = []
for t in rows:
tasks.append(db.session.query(model.task.Task).get(t.id))
return tasks
|
SHA2017-badge/micropython-esp32 | tests/jni/system_out.py | Python | mit | 144 | 0.006944 | try:
import jni
System = jni.cls("java/lang/System")
except:
prin | t("SKIP")
raise Sys | temExit
System.out.println("Hello, Java!")
|
yrchen/CommonRepo | config/urls.py | Python | apache-2.0 | 7,139 | 0.003502 | # -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
"""
URLs for Common Repository project.
http://www.commonrepo.com/
"""
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from filebrowser.sites import site
import nexus
from rest_framework.authtoken.views import obtain_auth_token
from rest_framework.urlpatterns import format_suffix_patterns
from commonrepo.common.routers import DefaultRouter
from commonrepo.elos_api.views import ELOViewSet, ELOViewSetV2, ELOTypeViewSet, ELOFileUploadViewSet
from commonrepo.elos_api.views import ELODiversity, ELODiversityAll, ELOSimilarity, ELOSimilarityAll, ELOFork
from commonrepo.infor_api.views import InforELOTotalCount, InforUsersTotalCount
from commonrepo.groups_api.views import GroupViewSet, GroupViewSetV2
from commonrepo.groups_api.views import GroupsMemberAbort, GroupsMemberJoin
from commonrepo.main import views as MainViews
from commonrepo.snippets_api.views import SnippetViewSet
from commonrepo.users_api.views import UserViewSet, UserViewSetV2
#
# API v1
#
router_api_v1 = DefaultRouter()
router_api_v1.register(r'api/v1/elos', ELOViewSet)
router_api_v1.register(r'api/v1/elotypes', ELOTypeViewSet)
router_api_v1.register(r'api/v1/groups', GroupViewSet)
router_api_v1.register(r'api/v1/snippets', SnippetViewSet)
router_api_v1.register(r'api/v1/users', UserViewSet)
#
# API v2
#
router_api_v2 = DefaultRouter()
router_api_v2.register(r'api/v2/elos', ELOViewSetV2)
router_api_v2.register(r'api/v2/groups', GroupViewSetV2)
router_api_v2.register(r'api/v2/users', UserViewSetV2)
urlpatterns = [
# Misc
url(r'^robots\.txt$', include('robots.urls')),
# Static Templates
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
ur | l(r'^download/$', TemplateView.as_view(template_name='pages/download.html'), name="download"),
url(r'^dashboard/$', MainViews.DashboardView.as_view(), name="dashboard"),
# Django Admin
url(r'^admin/filebrowser/', include(site.urls)), # django-filebrowser
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLs
url(r'^nexus/', include(nexus.site.urls)), # Nexus URLs
url(r'^admin/', includ | e(admin.site.urls)),
# User management
url(r'^users/', include("commonrepo.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^avatar/', include('avatar.urls')),
# Message
url(r'^messages/', include('messages_extends.urls')), # django-messages-extends
# Notifications
url(r'^notifications/', include('notifications.urls', namespace='notifications')),
# Activity
url('^activity/', include('actstream.urls')), # django-activity-stream
# Comments
url(r'^comments/', include('fluent_comments.urls')), # django-contrib-comments
# Documents
url(r'^api/docs/', include('rest_framework_swagger.urls')), # django-rest-swagger
# Search
url(r'^search/', include('haystack.urls'), name="search"), # django-haystack
# Your stuff: custom urls includes go here
url(r'^elos/', include("commonrepo.elos.urls", namespace="elos")),
url(r'^groups/', include("commonrepo.groups.urls", namespace="groups")),
# Django REST Framework (DRF) Authenticaion
url(r'^api/drf/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
#
# API v1
#
# API v1 - Endpoints
url(r'', include(router_api_v1.urls)),
# API v1 - Authenticaion
url(r'^api/v1/auth/', include('djoser.urls.authtoken')),
# API v1 - ELOs
url(r'^api/v1/elos-upload', ELOFileUploadViewSet),
url(r'^api/v1/elos/diversity/(?P<pk>[0-9]+)/(?P<pk2>[0-9]+)/$', 'commonrepo.elos_api.views.elos_diversity'),
url(r'^api/v1/elos/diversity/(?P<pk>[0-9]+)/all/$', 'commonrepo.elos_api.views.elos_diversity_all'),
url(r'^api/v1/elos/similarity/(?P<pk>[0-9]+)/(?P<pk2>[0-9]+)/$', 'commonrepo.elos_api.views.elos_similarity'),
url(r'^api/v1/elos/similarity/(?P<pk>[0-9]+)/all/$', 'commonrepo.elos_api.views.elos_similarity_all'),
url(r'^api/v1/elos/fork/(?P<pk>[0-9]+)/$', 'commonrepo.elos_api.views.elos_fork'),
# API v1 - Information
url(r'^api/v1/infor/elos-total/$', 'commonrepo.infor_api.views.elos_total_count'),
url(r'^api/v1/infor/users-total/$', 'commonrepo.infor_api.views.users_total_count'),
# API v1 - Groups
url(r'^api/v1/groups/abort/(?P<pk>[0-9]+)/$', 'commonrepo.groups_api.views.groups_member_abort'),
url(r'^api/v1/groups/join/(?P<pk>[0-9]+)/$', 'commonrepo.groups_api.views.groups_member_join'),
#
# API v2
#
# API v2 - Endpoints
url(r'', include(router_api_v2.urls)),
# API v2 - Authenticaion
url(r'^api/v2/auth/', include('djoser.urls.authtoken')),
# API v2 - ELOs
url(r'^api/v2/elos-upload', ELOFileUploadViewSet),
url(r'^api/v2/elos/diversity/(?P<pk>[0-9]+)/(?P<pk2>[0-9]+)/$', ELODiversity.as_view()),
url(r'^api/v2/elos/diversity/(?P<pk>[0-9]+)/all/$', ELODiversityAll.as_view()),
url(r'^api/v2/elos/similarity/(?P<pk>[0-9]+)/(?P<pk2>[0-9]+)/$', ELOSimilarity.as_view()),
url(r'^api/v2/elos/similarity/(?P<pk>[0-9]+)/all/$', ELOSimilarityAll.as_view()),
url(r'^api/v2/elos/fork/(?P<pk>[0-9]+)/$', ELOFork.as_view()),
# API v2 - Information
url(r'^api/v2/infor/elos-total/$', InforELOTotalCount.as_view()),
url(r'^api/v2/infor/users-total/$', InforUsersTotalCount.as_view()),
# API v2 - Groups
url(r'^api/v2/groups/abort/(?P<pk>[0-9]+)/$', GroupsMemberAbort.as_view()),
url(r'^api/v2/groups/join/(?P<pk>[0-9]+)/$', GroupsMemberJoin.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
SKIRT/PTS | modeling/build/models/stars.py | Python | agpl-3.0 | 41,196 | 0.004345 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.build.models.stars Contains the StarsBuilder class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
import numpy as np
# Import the relevant PTS classes and modules
from ....core.basics.log import log
from ....core.basics.configuration import ConfigurationDefinition
from ....core.basics.configuration import InteractiveConfigurationSetter, prompt_proceed, prompt_string, prompt_yn, prompt_filepath
from ....core.units.parsing import parse_unit as u
from ...core.mappings import Mappings
from .general import GeneralBuilder
from ..suite import model_map_filename
from ....core.tools import filesystem as fs
from ....magic.core.frame import Frame
from ...component.galaxy import GalaxyModelingComponent
from ....core.tools import types
from ....magic.tools import extinction
from ....core.tools.utils import lazyproperty
# -----------------------------------------------------------------
basic_old_map_name = "old_disk"
basic_young_map_name = "young"
basic_ionizing_map_name = "ionizing"
basic_stellar_map_names = [basic_old_map_name, basic_young_map_name, basic_ionizing_map_name]
# -----------------------------------------------------------------
bulge_component_name = "bulge"
old_component_name = "old"
young_component_name = "young"
ionizing_component_name = "ionizing"
basic_stellar_component_names = [bulge_component_name, old_component_name, young_component_name, ionizing_component_name]
# -----------------------------------------------------------------
# Define titles for the different fixed components
titles = dict()
titles[bulge_component_name] = "Evolved stella | r bulge"
titles[old_component_name] = "Evolved stellar disk"
titles[young_component_name] = "Young stars"
titles[ionizing_component_name] = "Ionizing stars"
# -----------------------------------------------------------------
component_name_for_map_name = dict()
component_name_ | for_map_name["old_disk"] = old_component_name
component_name_for_map_name["young"] = young_component_name
component_name_for_map_name["ionizing"] = ionizing_component_name
# -----------------------------------------------------------------
class StarsBuilder(GeneralBuilder, GalaxyModelingComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
#super(StarsBuilder, self).__init__(*args, **kwargs)
GeneralBuilder.__init__(self, no_config=True)
GalaxyModelingComponent.__init__(self, *args, **kwargs)
# Name of the maps from the maps selection
self.old_map_name = None
self.young_map_name = None
self.ionizing_map_name = None
# The Mappings template for the ionizing stellar component
self.ionizing_mappings = None
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Build bulge
if self.config.bulge: self.build_bulge()
# 3. Set the evolved stellar disk component
if self.config.old: self.build_old()
# 4. Set the ionizing stellar component
if self.config.ionizing: self.build_ionizing()
# 5. Set the young stellar component
if self.config.young: self.build_young()
# 6. Build additional stellar components
if self.config.additional: self.build_additional()
# 7. Write
self.write()
# -----------------------------------------------------------------
@property
def model_name(self):
"""
This function ...
:return:
"""
return self.config.name
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
#super(StarsBuilder, self).setup()
GeneralBuilder.setup(self, **kwargs)
GalaxyModelingComponent.setup(self, **kwargs)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_fuv_flux(self):
"""
This function ...
:return:
"""
# Determine unattenuated flux
factor = extinction.observed_to_intrinsic_factor(self.config.fuv_attenuation)
return self.observed_flux(self.fuv_filter, unit="Jy") * factor
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_fuv_luminosity(self):
"""
This function ...
:return:
"""
return self.intrinsic_fuv_flux.to("W/micron", wavelength=self.fuv_wavelength, distance=self.galaxy_distance)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_ionizing_fuv_luminosity(self):
"""
This function ...
:return:
"""
# Get the spectral luminosity at the FUV wavelength
return self.ionizing_mappings.luminosity_at(self.fuv_wavelength)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_ionizing_fuv_flux(self):
"""
This function ...
:return:
"""
return self.intrinsic_ionizing_fuv_luminosity.to("Jy", wavelength=self.fuv_wavelength, distance=self.galaxy_distance)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_ionizing_fuv_neutral_luminosity(self):
"""
This function ...
:return:
"""
return self.intrinsic_ionizing_fuv_luminosity.to("Lsun", density=True, density_strict=True, wavelength=self.fuv_wavelength)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_young_fuv_luminosity(self):
"""
This function ...
:return:
"""
# Checks
if self.intrinsic_ionizing_fuv_luminosity >= self.intrinsic_fuv_luminosity: raise ValueError("Cannot determine the initial normalization of young and ionizing component: intrinsic FUV luminosity of ionizing stars based on SFR is larger than the total unattenuated FUV luminosity")
if self.intrinsic_ionizing_fuv_luminosity / self.intrinsic_fuv_luminosity > 0.5: log.warning("The contribution of ionizing stars to the intrinsic FUV luminosity is more than 50%")
if self.intrinsic_ionizing_fuv_luminosity / self.intrinsic_fuv_luminosity < 0.1: log.warning("The contribution of ionizing stars to the intrinsic FUV luminosity is less than 10%")
# Return the difference fo the total unattenuated FUV luminosity and the intrinsic FUV luminosity of the ionizing stars
return self.intrinsic_fuv_luminosity - self.intrinsic_ionizing_fuv_luminosity
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_young_fuv_flux(self):
"""
This function ...
:return:
"""
return self.intrinsic_young_fuv_luminosity.to("Jy", wavelength=self.fuv_wavelength, distance=self.galaxy_distance)
# -----------------------------------------------------------------
@lazyproperty
def intrinsic_young_fuv_neutral_luminosity(self):
"""
This function ...
:return:
"""
return self.intrinsic_young_fuv_luminosity.to("Lsun", density=True, density_strict=True, wavelength=self.fuv_wavelength)
# ---------- |
annayqho/TheCannon | code/lamost/xcalib_5labels/make_tr_file_list.py | Python | mit | 797 | 0.016311 | import numpy as np
import glob
from lamost import load_spectra
allfiles = np.array(glob.glob("example_LAMOST/Data_All/*fits"))
# we want just the file names
allfiles = np.char.lstrip(allfiles, 'example_LAMOST/Data_All/')
dir_dat = "example_LAMO | ST/Data_All"
ID, wl, flux, ivar = load_spectra(dir_dat, allfiles)
npix = np | .array([np.count_nonzero(ivar[jj,:]) for jj in range(0,11057)])
good_frac = npix/3626.
SNR_raw = flux * ivar**0.5
bad = SNR_raw == 0
SNR_raw = np.ma.array(SNR_raw, mask=bad)
SNR = np.ma.median(SNR_raw, axis=1)
# we want to have at least 94% of pixels, and SNR of at least 100
good = np.logical_and(good_frac > 0.94, SNR>100)
tr_files = ID[good] #945 spectra
outputf = open("tr_files.txt", "w")
for tr_file in tr_files:
outputf.write(tr_file + '\n')
outputf.close()
|
CaliOpen/CaliOpen | src/backend/main/py.main/caliopen_main/common/helpers/normalize.py | Python | gpl-3.0 | 1,991 | 0.003516 | # -*- coding: utf-8 -*-
"""Normalization functions for different values."""
from __future__ import absolute_import, unicode_literals
import re
import logging
from email.utils import parseaddr
log = logging.getLogger(__name__)
mastodon_url_regex = '^https:\/\/(.*)\/@(.*)'
mastodon_url_legacy_regex = '^https:\/\/(.*)\/users\/(.*)'
def clean_email_address(addr):
"""Clean an email address for user resolve."""
try:
real_name, email = parseaddr(addr.replace('\r', ''))
except UnicodeError:
addr = addr.decode('utf-8', errors='ignore')
real_name, email = parseaddr(addr.replace('\r', ''))
err_msg = 'Invalid email address {}'.format(addr)
if not email or '@' not in email:
# Try something else
log.info('Last chance email parsing for {}'.format(addr))
matches = re.match('(.*)<(.*@.*)>', addr)
if matches and matches.groups():
real_name, email = matches.groups()
else:
log.warn(err_msg)
return ("", "")
name, domain = email.lower().split('@', 1)
if '@' in domain:
log.error(err_msg)
return ("", "")
if '+' in name:
try:
name, ext = name.split('+', 1)
except Exception as exc:
| log.info(exc)
# unicode everywhere
return (u'%s@%s' % (name, domain), email)
def clean_twitter_address(addr):
return addr.strip('@').lower()
def clean_mastodon_address(addr):
return addr.strip('@').lower().split('@')
def parse_mastodon_url(url):
"""extract username and domain from a mastodon account url
in the forma | t https://instance.tld/@username
:return: tuple (server, username)
"""
matches = re.findall(mastodon_url_regex, url)
if len(matches) != 1 or len(matches[0]) != 2:
# try legacy fallback
matches = re.findall(mastodon_url_legacy_regex, url)
if len(matches) != 1 or len(matches[0]) != 2:
raise SyntaxError
return matches[0]
|
erikjwaxx/python-gitlab-1 | setup.py | Python | gpl-3.0 | 502 | 0.041833 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "python-gitl | ab",
version = "0.1", |
packages = find_packages(),
install_requires = ['requests', 'markdown'],
# metadata for upload to PyPI
author = "Itxaka Serrano Garcia",
author_email = "itxakaserrano@gmail.com",
description = "See the README.md file for more information",
license = "GPL3",
keywords = "gitlab git wrapper",
url = "http://github.com/itxaka/python-gitlab/",
) |
andyfangdz/My-SAT-Life | satlife/settings.py | Python | unlicense | 2,253 | 0.000444 | """
Django settings for satlife project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djang | oproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
STATICFILES_DIRS = ( os.path.join('static'), )
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECR | ET_KEY = '-^%-%r3noa6z$b8jv(d3qr2r!06y+po-k^aofop=mjgr2m3i8k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'grammar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'satlife.urls'
WSGI_APPLICATION = 'satlife.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
|
drunz/parabench | src/ppc/Template.py | Python | gpl-3.0 | 2,742 | 0.007659 | # Parabench - A parallel file system benchmark
# Copyright (C) 2009-2010 Dennis Runz
# University of Heidelberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import glob
from cStringIO import StringIO
class Template:
"""Generates blocks of code using template files.
Brick maps contain placeholder -> value mappings that
are used to generate code by replacing placeholders
in templates with their corresponding values specified in
the brick maps"""
include_hook = '#include:' # format of the brick include hook used in template files
def __init__(self, template_name):
self._template_name = template_name
self._body_file = 'ppc/tpl/' + template_name + '.tpl'
self._brick_files = glob.glob('ppc/tpl/' + template_name + '/*.tpl')
def _process_brick(self, brick_map, brick_name):
source_file = open('ppc/tpl/' + self._template_name + '/' +brick_name +'.tpl', 'r')
code = StringIO()
for brick_instance in brick_map[brick_name]:
source_file.seek(0)
for line in source_file:
# Replace placeholder variables
for key, value in brick_instance.iteritems():
line = line.replace(key, value)
code.write(line)
source_file.close()
return code.getvalue()
def process(self, brick_map):
"""Process this template to code using the brick map provided."""
code = StringIO()
body_file = open(self._body_file, 'r')
for line in body_file:
# If include found, include brick file and process it to code
if self.include_hook in line:
code.write(self._process_brick( | brick_map, line.split()[1]))
# Else replace the options and add to code
else:
for key, value in brick_map['body'].iteritems():
line = line.replace(key, value)
code.write(line)
body_file.close()
| return code.getvalue()
def name(self):
return self._template_name
|
fredojones/chess | main.py | Python | mit | 145 | 0.013793 | #!/usr/ | local/bin/python3
from chess.app import App
def main():
app = App(curses=True)
app.run()
if __name__ == '__main__':
ma | in()
|
whateverpal/coinmetrics-tools | coincrawler/storage/postgres.py | Python | mit | 3,897 | 0.024378 | from coincrawler.storage import IStorage, IBlockStorageAccess, IPriceStorageAccess
mineableCurrencyColumns = ["height", "timestamp", "txVolume", "txCount", "generatedCoins", "fees", "difficulty"]
nonmineableCurrencyColumns = ["height", "timestamp", "txVolume", "txCount", "fees"]
class PostgresStorage(IStorage):
def __init__(self, dbHost, dbName, dbUser, dbPassword):
import psycopg2
self.connection = psycopg2.connect("host=%s dbname=%s user=%s password=%s" % (dbHost, dbName, dbUser, dbPassword))
self.cursor = self.connection.cursor()
def __del__(self):
self.close()
def queryNoReturnCommit(self, text, params=None):
self.cursor.execute(text, params)
self.connection.commit()
def queryNoReturnNoCommit(self, text, params=None):
self.cursor.execute(text, params)
def queryReturnOne(self, text, params=None):
self.cursor.execute(text, params)
return self.cursor.fetchone()
def queryReturnAll(self, text, params=None):
self.cursor.execute(text, params)
return self.cursor.fetchall()
def commit(self):
self.connection.commit()
def close(self):
self.cursor.close()
self.connection.close()
def getBlockStorageAccess(self, currency):
columns = mineableCurr | encyColumns if currency != "xem" else nonmineableCurrencyColumns
return PostgresStorageBlockAccess(currency, columns, self)
def getPriceStorageAccess(self, currency):
return PostgresPriceStorageAccess(currency, self)
class PostgresStorageBlockAccess(IBlockStorageAccess):
BLOCK_T | ABLE_COLUMNS = {
"height": "INTEGER PRIMARY KEY",
"timestamp": "TIMESTAMP",
"txVolume": "NUMERIC",
"txCount": "INTEGER",
"generatedCoins": "NUMERIC",
"fees": "NUMERIC",
"difficulty": "NUMERIC",
}
def __init__(self, ticker, columns, db):
self.db = db
self.ticker = ticker
self.columns = columns
self.tableName = "blocks_" + self.ticker
columnsText = ", ".join([column + " " + PostgresStorageBlockAccess.BLOCK_TABLE_COLUMNS[column] for column in self.columns])
self.db.queryNoReturnCommit("CREATE TABLE IF NOT EXISTS %s (%s)" % (self.tableName, columnsText))
def getBlockHeight(self):
result = self.db.queryReturnAll("SELECT height FROM %s ORDER BY height DESC LIMIT 1" % self.tableName)
if len(result) > 0:
return int(result[0][0])
else:
return 0
def getBlockTimestamp(self, height):
result = self.db.queryReturnAll("SELECT timestamp FROM %s ORDER BY height DESC LIMIT 1" % self.tableName)
if len(result) > 0:
return result[0][0]
else:
return 0
def storeBlock(self, block):
columnsText = ", ".join([column for column in self.columns])
valuesText = ", ".join(["%s" for i in xrange(len(self.columns))])
blockData = tuple()
for column in self.columns:
blockData += (block[column],)
self.db.queryNoReturnCommit("INSERT INTO blocks_" + self.ticker + " (" + columnsText + ") VALUES (" + valuesText + ")", blockData)
def getBlocksRange(self, offset, count):
return self.db.queryReturnAll("SELECT timestamp, txVolume, txCount, generatedCoins, fees FROM " + self.tableName + " ORDER BY HEIGHT ASC LIMIT %s OFFSET %s",
(count, offset))
class PostgresPriceStorageAccess(IPriceStorageAccess):
def __init__(self, ticker, db):
self.db = db
self.ticker = ticker
self.tableName = "priceUsd_" + ticker
self.db.queryNoReturnCommit("CREATE TABLE IF NOT EXISTS %s (timestamp TIMESTAMP PRIMARY KEY, price NUMERIC, marketcap NUMERIC, totalExchangeVolume NUMERIC)" % self.tableName)
def storePrices(self, tuples):
for row in tuples:
self.db.queryNoReturnNoCommit("INSERT INTO " + self.tableName + " (timestamp, price, marketcap, totalExchangeVolume) VALUES (%s, %s, %s, %s)", row)
self.db.commit()
def getPrices(self):
return self.db.queryReturnAll("SELECT timestamp, price, marketCap, totalExchangeVolume FROM " + self.tableName)
def flushPrices(self):
self.db.queryNoReturnCommit("TRUNCATE TABLE " + self.tableName)
|
bendykst/deluge | deluge/plugins/WebUi/setup.py | Python | gpl-3.0 | 1,489 | 0.002686 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.c | om>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from setuptools import find_packages, setup
__plugin_nam | e__ = "WebUi"
__author__ = "Damien Churchill"
__author_email__ = "damoxc@gmail.com"
__version__ = "0.1"
__url__ = "http://deluge-torrent.org"
__license__ = "GPLv3"
__description__ = "Allows starting the web interface within the daemon."
__long_description__ = """"""
__pkg_data__ = {"deluge.plugins." + __plugin_name__.lower(): ["template/*", "data/*"]}
setup(
name=__plugin_name__,
version=__version__,
description=__description__,
author=__author__,
author_email=__author_email__,
url=__url__,
license=__license__,
long_description=__long_description__ if __long_description__ else __description__,
packages=find_packages(),
namespace_packages=["deluge", "deluge.plugins"],
package_data=__pkg_data__,
entry_points="""
[deluge.plugin.core]
%s = deluge.plugins.%s:CorePlugin
[deluge.plugin.gtkui]
%s = deluge.plugins.%s:GtkUIPlugin
""" % ((__plugin_name__, __plugin_name__.lower()) * 2)
)
|
songtao-yang/ProjectEuler-Python | 9.Special Pythagorean triplet/problem_9.py | Python | mit | 626 | 0.003221 | # -*- coding: ut | f-8 -*-
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
#
# a² + b² = c²
# For example, 3² + 4² = 9 + 16 = 25 = 52.
#
# There exists exactly one Pythagorean tripl | et for which a + b + c = 1000.
# Find the product abc.
def resolve(n):
for a in range(1, n / 2 - 2):
for b in range(a, n / 2 - 1):
c = n - a - b
if not c > b:
continue
if a ** 2 + b ** 2 == c ** 2:
return a, b, c
return None
if __name__ == '__main__':
ret = resolve(1000)
print ret, reduce(lambda x, y: x * y, ret)
|
hradec/gaffer | python/GafferImageTest/FormatDataTest.py | Python | bsd-3-clause | 3,696 | 0.040855 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR | SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
######################################################## | ##################
import unittest
import imath
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class FormatDataTest( GafferImageTest.ImageTestCase ) :
def test( self ) :
f1 = GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 100 ) ), 0.5 )
f2 = GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 100 ) ), 1 )
fd1a = GafferImage.FormatData( f1 )
fd1b = GafferImage.FormatData( f1 )
fd2 = GafferImage.FormatData( f2 )
self.assertEqual( fd1a.value, f1 )
self.assertEqual( fd1b.value, f1 )
self.assertEqual( fd2.value, f2 )
self.assertEqual( fd1a, fd1b )
self.assertNotEqual( fd1a, fd2 )
self.assertEqual( fd1a.hash(), fd1b.hash() )
self.assertNotEqual( fd1a.hash(), fd2.hash() )
fd2c = fd2.copy()
self.assertEqual( fd2c, fd2 )
self.assertEqual( fd2c.hash(), fd2.hash() )
def testSerialisation( self ) :
f = GafferImage.Format( imath.Box2i( imath.V2i( 10, 20 ), imath.V2i( 200, 100 ) ), 0.5 )
fd = GafferImage.FormatData( f )
m = IECore.MemoryIndexedIO( IECore.CharVectorData(), [], IECore.IndexedIO.OpenMode.Write )
fd.save( m, "f" )
m2 = IECore.MemoryIndexedIO( m.buffer(), [], IECore.IndexedIO.OpenMode.Read )
fd2 = IECore.Object.load( m2, "f" )
self.assertEqual( fd2, fd )
self.assertEqual( fd2.value, f )
def testAutoConstructFromFormat( self ) :
f = GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 100 ) ), 0.5 )
d = IECore.CompoundData()
d["f"] = f
self.assertEqual( d["f"], GafferImage.FormatData( f ) )
def testStoreInContext( self ) :
f = GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 200, 100 ) ), 0.5 )
d = GafferImage.FormatData( f )
c = Gaffer.Context()
c["f"] = d
self.assertEqual( c["f"], d )
def testEditableScopeForFormat( self ) :
GafferImageTest.testEditableScopeForFormat()
if __name__ == "__main__":
unittest.main()
|
teemulehtinen/a-plus | userprofile/tests.py | Python | gpl-3.0 | 7,964 | 0.004646 | from datetime import timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from django.conf import settings
from course.models import Course, CourseInstance
from exercise.models import LearningObjectCategory
from userprofile.models import UserProfile
class UserProfileTest(TestCase):
def setUp(self):
self.student = User(username="testUser", first_name="Superb", last_name="Student", email="test@aplus.com")
self.student.set_password("testPassword")
self.student.save()
self.student_profile = self.student.userprofile
self.student_profile.student_id = "12345X"
self.student_profile.organization = settings.LOCAL_ORGANIZATION
self.student_profile.save()
self.grader = User(username="grader", first_name="Grumpy", last_name="Grader", email="grader@aplus.com")
self.grader.set_password("graderPassword")
self.grader.save()
self.grader_profile = self.grader.userprofile
self.grader_profile.student_id = "67890Y"
self.grader_profile.organization = settings.LOCAL_ORGANIZATION
self.grader_profile.save()
self.teacher = User(username="teacher", first_name="Tedious", last_name="Teacher", email="teacher@aplus.com", is_staff=True)
self.teacher.set_password("teacherPassword")
self.teacher.save()
self.teacher_profile = self.teacher.userprofile
self.superuser = User(username="superuser", first_name="Super", last_name="User", email="superuser@a | plus.com", is_superuser=True)
self.superuser.set_password("superuserPassword")
self | .superuser.save()
self.superuser_profile = self.superuser.userprofile
self.course = Course.objects.create(
name="test course",
code="123456",
url="Course-Url"
)
self.today = timezone.now()
self.tomorrow = self.today + timedelta(days=1)
self.two_days_from_now = self.tomorrow + timedelta(days=1)
self.course_instance1 = CourseInstance.objects.create(
instance_name="Fall 2011 day 1",
starting_time=self.today,
ending_time=self.tomorrow,
course=self.course,
url="T-00.1000_d1"
)
self.course_instance1.add_teacher(self.teacher.userprofile)
self.course_instance1.add_assistant(self.grader.userprofile)
self.course_instance2 = CourseInstance.objects.create(
instance_name="Fall 2011 day 2",
starting_time=self.tomorrow,
ending_time=self.two_days_from_now,
course=self.course,
url="T-00.1000_d2"
)
self.course_instance2.add_teacher(self.teacher.userprofile)
self.learning_object_category1 = LearningObjectCategory.objects.create(
name="test category 1",
course_instance=self.course_instance1
)
#self.learning_object_category1.hidden_to.add(self.student_profile)
#self.learning_object_category1.hidden_to.add(self.grader_profile)
self.learning_object_category2 = LearningObjectCategory.objects.create(
name="test category 2",
course_instance=self.course_instance1
)
#self.learning_object_category2.hidden_to.add(self.student_profile)
self.learning_object_category3 = LearningObjectCategory.objects.create(
name="test category 3",
course_instance=self.course_instance2
)
def test_userprofile_get_by_student_id(self):
self.assertEqual(self.student_profile, UserProfile.get_by_student_id("12345X"))
self.assertEqual(self.grader_profile, UserProfile.get_by_student_id("67890Y"))
self.assertRaises(UserProfile.DoesNotExist, UserProfile.get_by_student_id, "111111")
def test_userprofile_unicode_string(self):
self.assertEqual("testUser (Superb Student, test@aplus.com, 12345X)", str(self.student_profile))
self.assertEqual("grader (Grumpy Grader, grader@aplus.com, 67890Y)", str(self.grader_profile))
self.assertEqual("teacher (Tedious Teacher, teacher@aplus.com)", str(self.teacher_profile))
self.assertEqual("superuser (Super User, superuser@aplus.com)", str(self.superuser_profile))
def test_userprofile_gravatar_url(self):
self.assertEqual("http://www.gravatar.com/avatar/36eb57f675f34b81bd859c525cb2b676?d=identicon", self.student_profile.avatar_url)
self.assertEqual("http://www.gravatar.com/avatar/e2321e37326539393fbae72b7558df8e?d=identicon", self.grader_profile.avatar_url)
self.assertEqual("http://www.gravatar.com/avatar/1bfe4ecc42454c9c1dc02bf93073a414?d=identicon", self.teacher_profile.avatar_url)
self.assertEqual("http://www.gravatar.com/avatar/f35e575136edbfb920643d10560e8814?d=identicon", self.superuser_profile.avatar_url)
def test_userprofile_shortname(self):
self.assertEqual("Superb S.", self.student_profile.shortname)
self.assertEqual("Grumpy G.", self.grader_profile.shortname)
self.assertEqual("Tedious T.", self.teacher_profile.shortname)
self.assertEqual("Super U.", self.superuser_profile.shortname)
# def test_userprofile_reset_hidden_categories_cache(self):
# self.student_profile.reset_hidden_categories_cache()
# self.assertEqual(2, len(self.student_profile.cached_hidden_categories))
# self.assertEqual(self.learning_object_category1, self.student_profile.cached_hidden_categories[0])
# self.assertEqual(self.learning_object_category2, self.student_profile.cached_hidden_categories[1])
#
# self.grader_profile.reset_hidden_categories_cache()
# self.assertEqual(1, len(self.grader_profile.cached_hidden_categories))
# self.assertEqual(self.learning_object_category1, self.grader_profile.cached_hidden_categories[0])
#
# self.teacher_profile.reset_hidden_categories_cache()
# self.assertEqual(0, len(self.teacher_profile.cached_hidden_categories))
#
# self.superuser_profile.reset_hidden_categories_cache()
# self.assertEqual(0, len(self.superuser_profile.cached_hidden_categories))
#
#
# def test_userprofile_hidden_categories_cache(self):
# student_hidden_categories_cache = self.student_profile.get_hidden_categories_cache()
# self.assertEqual(2, len(student_hidden_categories_cache))
# self.assertEqual(self.learning_object_category1, student_hidden_categories_cache[0])
# self.assertEqual(self.learning_object_category2, student_hidden_categories_cache[1])
#
# grader_hidden_categories_cache = self.grader_profile.get_hidden_categories_cache()
# self.assertEqual(1, len(grader_hidden_categories_cache))
# self.assertEqual(self.learning_object_category1, grader_hidden_categories_cache[0])
#
# self.assertEqual(0, len(self.teacher_profile.get_hidden_categories_cache()))
#
# self.assertEqual(0, len(self.superuser_profile.get_hidden_categories_cache()))
# def test_studentgroup_students_from_request(self):
# requestWithGroup = HttpRequest()
# requestWithGroup.user = self.student
# requestWithGroup.META["STUDENT_GROUP"] = self.student_group2
# studentsFromRequestWithGroup = StudentGroup.get_students_from_request(requestWithGroup)
# self.assertEqual(2, len(studentsFromRequestWithGroup))
# self.assertEqual(self.student_profile, studentsFromRequestWithGroup[0])
# self.assertEqual(self.grader_profile, studentsFromRequestWithGroup[1])
#
# requestWithoutGroup = HttpRequest()
# requestWithoutGroup.user = self.student
# studentsFromRequestWithoutGroup = StudentGroup.get_students_from_request(requestWithoutGroup)
# self.assertEqual(1, len(studentsFromRequestWithoutGroup))
# self.assertEqual(self.student_profile, studentsFromRequestWithoutGroup[0])
|
oomlout/oomlout-OOMP | old/OOMPpart_RESE_0805_X_O271_67.py | Python | cc0-1.0 | 243 | 0 | import OOMP
newPart = OO | MP.oompItem(9452)
newPart.addTag("oompType", "RESE")
newPart.addTag("oompSize", "0805")
newPart.addTag("oompColor", "X")
newPart. | addTag("oompDesc", "O271")
newPart.addTag("oompIndex", "67")
OOMP.parts.append(newPart)
|
rbiswas4/simlib | scripts/make_simlib_enigma.py | Python | mit | 2,405 | 0.00499 | from __future__ import absolute_import, division, print_function
import opsimsummary as oss
import opsimsummary.summarize_opsim as so
from sqlalchemy import create_engine
import pandas as pd
import time
import os
script_start = time.time()
log_str = 'Running script with opsimsummary version {}\n'.format(oss.__VERSION__)
log_val = 'Starting Calculation at {}\n'.format(script_start)
log_str += log_val
pkgDir = os.path.split(oss.__file__)[0]
dbname = os.path.join('/Users/rbiswas/data/', 'LSST/OpSimData',
'enigma_1189_sqlite.db')
log_val = 'The OpSim DataBase used is {}\n'.format(dbname)
log_str += log_val
engineFile = 'sqlite:///' + dbname
engine = create_engine(engineFile)
# read the database into a `pd.DataFrame`
Summary = pd.read_sql_table('Summary', engine)
log_val = 'dataframe read in from database {}\n'.format(time.time())
log_str += log_val
def _writeSimlibFor(propIDList, simlibFileName, description='DDF',
log_str= log_str):
df = Summary.query('propID == @propIDList')
df.drop_duplicates(inplace=True)
opSummary = so.SummaryOpsim(df, calculateSNANASimlibs=True,
user='rbiswas', host='time')
log_val = 'The summary has {} entries\n'.format(len(df))
print(log_val)
log_str += log_val
log_val = \
'The summary has {} unique fields\n'.format(len(df.fieldID.unique()))
print(log_val)
log_str += log_val
log_val = 'Wri | ting simlib for {0} input to outfile {1}\n'.format(description,
simlibFileName)
print(log_val)
log_str += log_val
opSummary.writeSimlib(simlibFileName)
log_val = 'Done simlib calculation at {0} and simlib written to {1}\n'.\
format(time.time(), simlibFileName)
print(log_val)
log_str += log_val
WFDSimlib = os.path.join('../opsimsummary/example_data/',
'Enigma_1189_WFD.simlib')
DDFSimlib = os.path.join(' | ../opsimsummary/example_data/',
'Enigma_1189_DDF.simlib')
CombSimlib = os.path.join('../opsimsummary/example_data/',
'Enigma_1189_Combined.simlib')
_writeSimlibFor([366], DDFSimlib, description='DDF')
_writeSimlibFor([364], WFDSimlib, description='WFD')
_writeSimlibFor([364, 366], CombSimlib, description='Combined')
logfile = 'enigma_simlibs.log'
with open(logfile, 'w') as f:
f.write(log_str)
|
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/y/yield_inside_async_function_py36.py | Python | mit | 327 | 0.006116 | """Test that `yield` or `yield from` | can't be used inside an async function."""
# pylint: disable=missing-docstring, unused-variable
async def g | ood_coro():
def _inner():
yield 42
yield from [1, 2, 3]
async def bad_coro():
yield 42
yield from [1, 2, 3] # [yield-inside-async-function]
|
rahulunair/nova | nova/tests/functional/test_metadata.py | Python | apache-2.0 | 7,360 | 0 | # Copyright 2016 Rackspace Australia
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import fixtures
import jsonschema
import os
import requests
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
class fake_result(object):
def __init__(self, result):
self.status_code = 200
self.text = jsonutils.dumps(result)
real_request = requests.request
def fake_request(obj, url, method, **kwargs):
if url.startswith('http://127.0.0.1:123'):
return fake_result({'a': 1, 'b': 'foo'})
if url.startswith('http://127.0.0.1:124'):
return fake_result({'c': 3})
if url.startswith('http://127.0.0.1:125'):
return fake_result(jsonutils.loads(kwargs.get('data', '{}')))
return real_request(method, url, **kwargs)
class MetadataTest(test.TestCase, integrated_helpers.InstanceHelperMixin):
def setUp(self):
super(MetadataTest, self).setUp()
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
self.start_service('conductor')
self.start_service('scheduler')
self.api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).api
self.start_service('compute')
# create a server for the tests
server = self._build_server(name='test')
server = self.api.post_server({'server': server})
self.server = self._wait_for_state_change(server, 'ACTIVE')
self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer())
self.md_url = self.api_fixture.md_url
# make sure that the metadata service returns information about the
# server we created above
def fake_get_fixed_ip_by_address(self, ctxt, address):
return {'instance_uuid': server['id']}
self.useFixture(
fixtures.MonkeyPatch(
'nova.network.neutron.API.get_fixe | d_ip_by_address',
fake_get_fixed_ip_by_address))
def test_lookup_metadata_root_url(self):
res = requests.request('GET', self.md_url, timeout=5)
self.assertEqual(200, res.status_code)
def test_lookup_metadata_openstack_url(self):
url = '%sopenstack' % self. | md_url
res = requests.request('GET', url, timeout=5,
headers={'X-Forwarded-For': '127.0.0.2'})
self.assertEqual(200, res.status_code)
def test_lookup_metadata_data_url(self):
url = '%sopenstack/latest/meta_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('hostname', j)
self.assertEqual('test.novalocal', j['hostname'])
def test_lookup_external_service(self):
self.flags(
vendordata_providers=['StaticJSON', 'DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'hamster@http://127.0.0.1:123'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertEqual({}, j['static'])
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertEqual(1, j['hamster']['a'])
self.assertEqual('foo', j['hamster']['b'])
def test_lookup_external_service_no_overwrite(self):
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:123',
'testing@http://127.0.0.1:124'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertNotIn('static', j)
self.assertEqual(1, j['testing']['a'])
self.assertEqual('foo', j['testing']['b'])
self.assertNotIn('c', j['testing'])
def test_lookup_external_service_passes_data(self):
# Much of the data we pass to the REST service is missing because of
# the way we've created the fake instance, but we should at least try
# and ensure we're passing _some_ data through to the external REST
# service.
self.flags(
vendordata_providers=['DynamicJSON'],
vendordata_dynamic_targets=[
'testing@http://127.0.0.1:125'
],
group='api'
)
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/2016-10-06/vendor_data2.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
j = jsonutils.loads(res.text)
self.assertIn('instance-id', j['testing'])
self.assertTrue(uuidutils.is_uuid_like(j['testing']['instance-id']))
self.assertIn('hostname', j['testing'])
self.assertEqual(self.server['tenant_id'], j['testing']['project-id'])
self.assertIn('metadata', j['testing'])
self.assertIn('image-id', j['testing'])
self.assertIn('user-data', j['testing'])
def test_network_data_matches_schema(self):
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.Session.request', fake_request))
url = '%sopenstack/latest/network_data.json' % self.md_url
res = requests.request('GET', url, timeout=5)
self.assertEqual(200, res.status_code)
# load the jsonschema for network_data
schema_file = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../doc/api_schemas/network_data.json"))
with open(schema_file, 'rb') as f:
schema = jsonutils.load(f)
jsonschema.validate(res.json(), schema)
|
hkariti/ansible | lib/ansible/utils/module_docs_fragments/vca.py | Python | gpl-3.0 | 2,634 | 0.001519 | # (c) 2016, Charles Paul <cpaul@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Parameters for VCA modules
DOCUMENTATION = """
options:
username:
description:
- The vca username or email address, if not set the environment variable C(VCA_USER) is checked for the username.
required: false
default: None
aliases: ['user']
password:
description:
- The vca password, if not set the environment variable C(VCA_PASS) is checked for the password.
required: false
default: None
aliases: ['pass', 'passwd']
org:
description:
- The org to login to for creating vapp. This option is required when the C(service_type) is I(vdc).
required: false
default: None
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp.
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
r | equired: false
default: None
api_version:
description:
- The api version to be used with the vca.
required: false
default: "5.7"
service_type:
| description:
- The type of service we are authenticating against.
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
state:
description:
- If the object should be added or removed.
required: false
default: present
choices: [ "present", "absent" ]
verify_certs:
description:
- If the certificates of the authentication is to be verified.
type: bool
default: True
vdc_name:
description:
- The name of the vdc where the gateway is located.
required: false
default: None
gateway_name:
description:
- The name of the gateway of the vdc where the rule should be added.
required: false
default: gateway
"""
|
praekelt/django-scaler | scaler/middleware.py | Python | bsd-3-clause | 6,116 | 0 | import time
import re
from django.http import HttpResponseRedirect
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.conf import settings
# In-memory caches are used since different processes do not necessarily
# exhibit the same response times, even though they may share a caching backend
# like memcached. We also don't have to be concerned with thread safety so no
# need to use LocMemCache.
_cache = {}
_request_response_times = {}
SERVER_BUSY_URL = reverse(
settings.DJANGO_SCALER.get('server_busy_url_name', 'server-busy')
)
def redirect_n_slowest_dummy():
return 0
def redirect_n_slowest_from_cache():
"""Simple retrieval from whatever cache is in use"""
return cache.get('django_scaler_n_slowest')
def redirect_percentage_slowest_dummy():
return 0
def redirect_percentage_slowest_from_cache():
return cache.get('django_scaler_percentage_slowest')
def redirect_regexes_dummy():
return []
def redirect_regexes_from_cache():
return cache.get('django_scaler_regexes')
class ScalerMiddleware:
"""Add as the first middleware in your settings file"""
def process_request(self, request):
# Ajax requests are not subject to scaling. Busy page is exempt from
# scaling.
if request.is_ajax() or request.META['PATH_INFO'] == SERVER_BUSY_URL:
return
# If a n_slowest or percentage_slowest is provided then forcefully
# redirect the n slowest or percentage_slowest requests. This allows
# external processes to easily instruct us to scale back.
n_slowest = settings.DJANGO_SCALER.get(
'redirect_n_slowest_function', redirect_n_slowest_dummy
)()
percentage_slowest = settings.DJANGO_SCALER.get(
'redirect_percentage_slowest_function',
redirect_percentage_slowest_dummy
)()
regexes = settings.DJANGO_SCALER.get(
'redirect_regexes_function',
redirect_regexes_dummy
)()
if not request.is_ajax():
if n_slowest or percentage_slowest:
# Sort by slowest reversed
paths = sorted(
_request_response_times,
key=_request_response_times.__getitem__,
reverse=True
)
if n_slowest:
li = paths[:n_slowest]
if request.META['PATH_INFO'] in li:
return HttpResponseRedirect(SERVER_BUSY_URL)
if percentage_slowest:
n = int(round(percentage_slowest / 100.0 * len(paths)))
li = paths[:n]
if request.META['PATH_INFO'] in li:
return HttpResponseRedirect(SERVER_BUSY_URL)
if regexes:
for regex in regexes:
m = re.match(r'%s' % regex, request.META['PATH_INFO'])
if m is not None:
return HttpResponseRedirect(SERVER_BUSY_URL)
# On to automatic redirection
now = time.time()
# Marker for process_response
setattr(request, '_django_scaler_stamp', now)
# Cache key uses path info
prefix = request.META['PATH_INFO'] + '-scaler-'
# Fetch values
key_stamp = prefix + 'stamp'
key_hits = prefix + 'hits'
key_trend = prefix + 'trend'
key_redir = prefix + 'redir'
stamp = _cache.get(key_stamp, 0)
hits = _cache.get(key_hits, 0)
trend = _cache.get(key_trend, [])
redir = _cache.get(key_redir, now)
# Nothing to do if not enough hits yet
if hits > settings.DJANGO_SCALER.get('trend_size', 100):
avg = stamp * 1.0 / hits
# Update request response times dictionary
_request_response_times[request.META['PATH_INFO']] = avg
# If trend is X slower than average then redirect, unless
# enough time has passed to attempt processing.
slow_threshold = settings.DJANGO_SCALER.get(
'slow_threshold', 4.0
)
if sum(trend) * 1.0 / len(trend) > avg * slow_threshold:
# Has enough time passed to allow the request?
redirect_for = settings.DJANGO_SCALER.get(
'redirect_for', 60
)
if now - redir > redirect_for:
# Yes, enough time has passed
# Clear time of last redirect
try:
del _cache[key_redir]
except KeyError:
pass
# Clear trend since it currently stores slow response
# times. We want a fresh start.
_cache[key_trend] = []
else:
# No, not enough time has passed. Keep redirecting.
# Remove marker so process_response does not store data
delattr(request, '_django_scaler_stamp')
# Set time of last redirect if it has not been set
_cache.setdefault(key_redir, now)
return HttpResponseRedirect(SERVER_BUSY_URL)
def process_response(self, request, response):
t = getattr(request, '_django_scaler_stamp', None)
# Anything to do?
| if t is not None:
# Diff in milliseconds
diff = int((time.time() - t) * 1000)
# Fetch values
prefix = request.META['PATH_INFO'] + '-scaler-'
key_stamp = prefix + 'stamp'
key_hits = prefix + 'hits'
key_trend = prefix + 'trend'
| stamp = _cache.get(key_stamp, 0)
hits = _cache.get(key_hits, 0)
trend = _cache.get(key_trend, [])
# Set values
_cache[key_stamp] = stamp + diff
_cache[key_hits] = hits + 1
trend_size = settings.DJANGO_SCALER.get('trend_size', 100)
_cache[key_trend] = (trend + [diff])[-trend_size:]
return response
|
miroli/frenchy | frenchy/utils.py | Python | mit | 651 | 0.001536 | import os
import pandas as pd
from .config import BASE_URL
dirname = os.path.dirname(os.path.abspath(__file__))
df = pd.read_pickle(os.path.join(dirname, 'data.p'))
def get_geo(code, year):
row = df[df['insee_code'] == code]
return ro | w.to_dict('records')[0]
def url_resolver(code, year, region_code, department_code):
zero_pad = lambda num: f'0{str(num)}'
if year == 2012:
reg_code = zero_pad(region_code)
dep_code = zero_pad(depar | tment_code)
com_code = zero_pad(code)
url = (f'{BASE_URL}elecresult__PR2012/(path)/PR2012/'
f'{reg_code}/{dep_code}/{com_code}.html')
return url
|
alphagov/notifications-utils | notifications_utils/request_helper.py | Python | mit | 4,607 | 0.002822 | from flask import abort, current_app, request
from flask.wrappers import Request
class NotifyRequest(Request):
"""
A custom Request class, implementing extraction of zipkin headers used to trace request through cloudfoundry
as described here: https://docs.cloudfoundry.org/concepts/http-routing.html | #zipkin-headers
"""
@property
def request_id(self):
return self.trace_id
@property
def trace_id(self):
"""
The "trace id" (in zipkin terms) assigned to this request, if present (None otherwise)
"""
if not hasattr(self, "_trace_id"):
self._trace_id = self._get_header_value(cu | rrent_app.config['NOTIFY_TRACE_ID_HEADER'])
return self._trace_id
@property
def span_id(self):
"""
The "span id" (in zipkin terms) set in this request's header, if present (None otherwise)
"""
if not hasattr(self, "_span_id"):
# note how we don't generate an id of our own. not being supplied a span id implies that we are running in
# an environment with no span-id-aware request router, and thus would have no intermediary to prevent the
# propagation of our span id all the way through all our onwards requests much like trace id. and the point
# of span id is to assign identifiers to each individual request.
self._span_id = self._get_header_value(current_app.config['NOTIFY_SPAN_ID_HEADER'])
return self._span_id
@property
def parent_span_id(self):
"""
The "parent span id" (in zipkin terms) set in this request's header, if present (None otherwise)
"""
if not hasattr(self, "_parent_span_id"):
self._parent_span_id = self._get_header_value(current_app.config['NOTIFY_PARENT_SPAN_ID_HEADER'])
return self._parent_span_id
def _get_header_value(self, header_name):
"""
Returns value of the given header
"""
if header_name in self.headers and self.headers[header_name]:
return self.headers[header_name]
return None
class ResponseHeaderMiddleware(object):
def __init__(self, app, trace_id_header, span_id_header):
self.app = app
self.trace_id_header = trace_id_header
self.span_id_header = span_id_header
def __call__(self, environ, start_response):
def rewrite_response_headers(status, headers, exc_info=None):
lower_existing_header_names = frozenset(name.lower() for name, value in headers)
if self.trace_id_header not in lower_existing_header_names:
headers.append((self.trace_id_header, str(request.trace_id)))
if self.span_id_header not in lower_existing_header_names:
headers.append((self.span_id_header, str(request.span_id)))
return start_response(status, headers, exc_info)
return self.app(environ, rewrite_response_headers)
def init_app(app):
app.config.setdefault("NOTIFY_TRACE_ID_HEADER", "X-B3-TraceId")
app.config.setdefault("NOTIFY_SPAN_ID_HEADER", "X-B3-SpanId")
app.config.setdefault("NOTIFY_PARENT_SPAN_ID_HEADER", "X-B3-ParentSpanId")
app.request_class = NotifyRequest
app.wsgi_app = ResponseHeaderMiddleware(
app.wsgi_app,
app.config['NOTIFY_TRACE_ID_HEADER'],
app.config['NOTIFY_SPAN_ID_HEADER'],
)
def check_proxy_header_before_request():
keys = [
current_app.config.get('ROUTE_SECRET_KEY_1'),
current_app.config.get('ROUTE_SECRET_KEY_2'),
]
result, msg = _check_proxy_header_secret(request, keys)
if not result:
if current_app.config.get('CHECK_PROXY_HEADER', False):
current_app.logger.warning(msg)
abort(403)
# We need to return None to continue processing the request
# http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request
return None
def _check_proxy_header_secret(request, secrets, header='X-Custom-Forwarder'):
if header not in request.headers:
return False, "Header missing"
header_secret = request.headers.get(header)
if not header_secret:
return False, "Header exists but is empty"
# if there isn't any non-empty secret configured we fail closed
if not any(secrets):
return False, "Secrets are not configured"
for i, secret in enumerate(secrets):
if header_secret == secret:
return True, "Key used: {}".format(i + 1) # add 1 to make it human-compatible
return False, "Header didn't match any keys"
|
danielru/pySDC | pySDC/implementations/problem_classes/GrayScott_1D_FEniCS_implicit.py | Python | bsd-2-clause | 6,141 | 0.001791 | from __future__ import division
import dolfin as df
import numpy as np
import random
import logging
from pySDC.core.Problem import ptype
from pySDC.core.Errors import ParameterError
# noinspection PyUnusedLocal
class fenics_grayscott(ptype):
"""
Example implementing the forced 1D heat equation with Dirichlet-0 BC in [0,1]
Attributes:
V: function space
w: function for the RHS
w1: split of w, part 1
w2: split of w, part 2
F1: weak form of RHS, first part
F2: weak form of RHS, second part
F: weak form of RHS, full
M: full mass matrix for both parts
"""
def __init__(self, problem_params, dtype_u, dtype_f):
"""
Initialization routine
Args:
problem_params: custom parameters for the example
dtype_u: particle data type (will be passed parent class)
dtype_f: acceleration data type (will be passed parent class)
"""
# define the Dirichlet boundary
def Boundary(x, on_boundary):
return on_boundary
# these parameters will be used later, so assert their existence
essential_keys = ['c_nvars', 't0', 'family', 'order', 'refinements', 'Du', 'Dv', 'A', 'B']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
# set logger level for FFC and dolfin
df.set_log_level(df.WARNING)
logging.getLogger('FFC').setLevel(logging.WARNING)
# set solver and form parameters
df.parameters["form_compiler"]["optimize"] = True
df.parameters["form_compiler"]["cpp_optimize"] = True
# set mesh and refinement (for multilevel)
mesh = df.IntervalMesh(problem_params['c_nvars'], 0, 100)
for i in range(problem_params['refinements']):
mesh = df.refine(mesh)
# define function space for future reference
V = df.FunctionSpace(mesh, problem_params['family'], problem_params['order'])
self.V = V * V
# invoke super init, passing number of dofs, dtype_u and dtype_f
super(fenics_grayscott, self).__init__(self.V, dtype_u, dtype_f, problem_params)
# rhs in weak form
self.w = df.Function(self.V)
q1, q2 = df.TestFunctions(self.V)
self.w1, self.w2 = df.split(self.w)
self.F1 = (-self.params.Du * df.inner(df.nabla_grad(self.w1), df.nabla_grad(q1)) -
self.w1 * (self.w2 ** 2) * q1 + self.params.A * (1 - self.w1) * q1) * df.dx
self.F2 = (-self.params.Dv * df.inner(df.nabla_grad(self.w2), df.nabla_grad(q2)) +
self.w1 * (self.w2 ** 2) * q2 - self.params.B * self.w2 * q2) * df.dx
self.F = self.F1 + self.F2
# mass matrix
u1, u2 = df.TrialFunctions(self.V)
a_M = u1 * q1 * df.dx
M1 = df.assemble(a_M)
a_M = u2 * q2 * df.dx
M2 = df.assemble(a_M)
self.M = M1 + M2
def __invert_mass_matrix(self, u):
"""
Helper routine to invert mass matrix
Args:
u (dtype_u): current values
Returns:
dtype_u: inv(M)*u
"""
me = self.dtype_u(self.V)
A = 1.0 * self.M
b = self.dtype_u(u)
df.solve(A, me.values.vector(), b.values.vector())
return me
def solve_system(self, rhs, factor, u0, t):
"""
Dolfin's linear solver for (M-factor*A)u = rhs
Args:
rhs (dtype_f): right-hand side for the nonlinear system
factor (float): abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time
Returns:
dtype_u: solution as mesh
"""
sol = self.dtype_u(self.V)
self.w.assign(sol.values)
# fixme: is this really necessary to do each time?
q1, q2 = df.TestFunctions(self.V)
w1, w2 = df.split(self.w)
r1, r2 = df.split(rhs.values)
F1 = w1 * q1 * df.dx - factor * self.F1 - r1 * q1 * df.dx
F2 = w2 * q2 * df.dx - factor * self.F2 - r2 * q2 * df.dx
F = F1 + F2
du = df.TrialFunction(self.V)
J = df.derivative(F, self.w, du)
problem = df.NonlinearVariationalProblem(F, self.w, [], J)
solver = df.NonlinearVariationalSolver(problem)
prm = solver.parameters
prm['newton_solver']['absolute_tolerance'] = 1E-09
prm['newton_solver']['relative_tolerance'] = 1E-08
prm['newton_solver']['maximum_iterations'] = 100
prm['newton_solver']['relaxation_parameter'] = 1.0
solver.solve()
sol.values.assign(self.w)
return sol
def eval_f(self, u, t):
"""
Routine to evaluate both parts of the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS divided into two parts
"""
f = self.dtype_f(self.V)
self.w.assign(u.values)
f.values = df.Function(self.V, df.assemble(self.F))
f = self.__invert_mass_matrix(f)
return f
def u_exact(self, t):
"""
Routine to compute the exact solution at time t |
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
class InitialConditions(df.Expression):
def __init__(self):
# fixme: why do we need this?
random | .seed(2)
pass
def eval(self, values, x):
values[0] = 1 - 0.5 * np.power(np.sin(np.pi * x[0] / 100), 100)
values[1] = 0.25 * np.power(np.sin(np.pi * x[0] / 100), 100)
def value_shape(self):
return 2,
uinit = InitialConditions()
me = self.dtype_u(self.V)
me.values = df.interpolate(uinit, self.V)
return me
|
ESSolutions/ESSArch_Core | ESSArch_Core/tags/tests/test_search.py | Python | gpl-3.0 | 22,665 | 0.001853 | import time
from datetime import datetime
from pydoc import locate
from unittest import SkipTest
from countries_plus.models import Country
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import override_settings, tag
from django.urls import reverse
from django.utils import timezone
from django.utils.timezone import make_aware
from elasticsearch.client import IngestClient
from elasticsearch.exceptions import ConnectionError
from elasticsearch_dsl.connections import (
connections,
get_connection as get_es_connection,
)
from languages_plus.models import Language
from rest_framework import status
from rest_framework.test import APITestCase, APITransactionTestCase
from ESSArch_Core.agents.models import (
Agent,
AgentTagLink,
AgentTagLinkRelationType,
AgentType,
MainAgentType,
RefCode,
)
from ESSArch_Core.auth.models import Group, GroupType
from ESSArch_Core.configuration.models import Feature
from ESSArch_Core.ip.models import InformationPackage
from ESSArch_Core.maintenance.models import AppraisalJob
from ESSArch_Core.search import alias_migration
from ESSArch_Core.tags.documents import (
Archive,
Component,
File,
StructureUnitDocument,
)
from ESSArch_Core.tags.models import (
Structure,
StructureType, |
StructureUnit,
StructureUnitType,
Tag,
TagStructure,
TagVersion,
TagVersionType,
)
User = get_user_model | ()
def get_test_client(nowait=False):
client = get_es_connection('default')
# wait for yellow status
for _ in range(1 if nowait else 5):
try:
client.cluster.health(wait_for_status="yellow")
return client
except ConnectionError:
time.sleep(0.1)
else:
# timeout
raise SkipTest("Elasticsearch failed to start")
class ESSArchSearchBaseTestCaseMixin:
@staticmethod
def _get_client():
return get_test_client()
@classmethod
def setUpClass(cls):
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
connections.configure(**settings.ELASTICSEARCH_CONNECTIONS)
cls.es_client = cls._get_client()
IngestClient(cls.es_client).put_pipeline(id='ingest_attachment', body={
'description': "Extract attachment information",
'processors': [
{
"attachment": {
"field": "data",
"indexed_chars": "-1"
},
"remove": {
"field": "data"
}
}
]
})
super().setUpClass()
def setUp(self):
for _index_name, index_class in settings.ELASTICSEARCH_INDEXES['default'].items():
doctype = locate(index_class)
alias_migration.setup_index(doctype)
def tearDown(self):
self.es_client.indices.delete(index="*", ignore=404)
self.es_client.indices.delete_template(name="*", ignore=404)
@override_settings(ELASTICSEARCH_CONNECTIONS=settings.ELASTICSEARCH_TEST_CONNECTIONS)
@tag('requires-elasticsearch')
class ESSArchSearchBaseTestCase(ESSArchSearchBaseTestCaseMixin, APITestCase):
pass
@override_settings(ELASTICSEARCH_CONNECTIONS=settings.ELASTICSEARCH_TEST_CONNECTIONS)
@tag('requires-elasticsearch')
class ESSArchSearchBaseTransactionTestCase(ESSArchSearchBaseTestCaseMixin, APITransactionTestCase):
pass
class ComponentSearchTestCase(ESSArchSearchBaseTestCase):
fixtures = ['countries_data', 'languages_data']
@classmethod
def setUpTestData(cls):
cls.url = reverse('search-list')
Feature.objects.create(name='archival descriptions', enabled=True)
cls.user = User.objects.create()
permission = Permission.objects.get(codename='search')
cls.user.user_permissions.add(permission)
org_group_type = GroupType.objects.create(codename='organization')
cls.group1 = Group.objects.create(name='group1', group_type=org_group_type)
cls.group1.add_member(cls.user.essauth_member)
cls.group2 = Group.objects.create(name='group2', group_type=org_group_type)
cls.group2.add_member(cls.user.essauth_member)
cls.component_type = TagVersionType.objects.create(name='component', archive_type=False)
cls.archive_type = TagVersionType.objects.create(name='archive', archive_type=True)
def setUp(self):
super().setUp()
self.client.force_authenticate(user=self.user)
@staticmethod
def create_agent():
return Agent.objects.create(
type=AgentType.objects.create(main_type=MainAgentType.objects.create()),
ref_code=RefCode.objects.create(
country=Country.objects.get(iso='SE'),
repository_code='repo',
),
level_of_detail=0,
record_status=0,
script=0,
language=Language.objects.get(iso_639_1='sv'),
create_date=timezone.now(),
)
def test_search_component(self):
component_tag = Tag.objects.create()
component_tag_version = TagVersion.objects.create(
tag=component_tag,
type=self.component_type,
elastic_index="component",
)
Component.from_obj(component_tag_version).save(refresh='true')
with self.subTest('without archive'):
res = self.client.get(self.url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['hits']), 1)
structure_type = StructureType.objects.create()
structure_template = Structure.objects.create(type=structure_type, is_template=True)
archive_tag = Tag.objects.create()
archive_tag_version = TagVersion.objects.create(
tag=archive_tag,
type=self.archive_type,
elastic_index="archive",
)
self.group1.add_object(archive_tag_version)
structure, archive_tag_structure = structure_template.create_template_instance(archive_tag)
Archive.from_obj(archive_tag_version).save(refresh='true')
TagStructure.objects.create(tag=component_tag, parent=archive_tag_structure, structure=structure)
Component.index_documents(remove_stale=True)
with self.subTest('with archive'):
res = self.client.get(self.url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['hits']), 1)
self.assertEqual(res.data['hits'][0]['_id'], str(component_tag_version.pk))
with self.subTest('with archive, non-active organization'):
self.user.user_profile.current_organization = self.group2
self.user.user_profile.save()
res = self.client.get(self.url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['hits']), 0)
def test_filter_on_component_agent(self):
agent = self.create_agent()
component_tag = Tag.objects.create()
component_tag_version = TagVersion.objects.create(
tag=component_tag,
type=self.component_type,
elastic_index="component",
)
structure_type = StructureType.objects.create()
structure_template = Structure.objects.create(type=structure_type, is_template=True)
archive_tag = Tag.objects.create()
archive_tag_version = TagVersion.objects.create(
tag=archive_tag,
type=self.archive_type,
elastic_index="archive",
)
structure, archive_tag_structure = structure_template.create_template_instance(archive_tag)
Archive.from_obj(archive_tag_version).save(refresh='true')
TagStructure.objects.create(tag=component_tag, parent=archive_tag_structure, structure=structure)
AgentTagLink.objects.create(
agent=agent,
tag=componen |
harry-7/addons-server | src/olympia/devhub/forms.py | Python | bsd-3-clause | 33,216 | 0 | # -*- coding: utf-8 -*-
import os
from django import forms
from django.conf import settings
from django.db.models import Q
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
import jinja2
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.activity.utils import log_and_notify
from olympia.addons.forms import AddonFormBase
from olympia.addons.models import (
Addon, AddonCategory, AddonDependency, AddonReviewerFlags, AddonUser,
Preview)
from olympia.amo.fields import HttpHttpsOnlyURLField
from olympia.amo.forms import AMOModelForm
from olympia.amo.templatetags.jinja_helpers import mark_safe_lazy
from olympia.amo.urlresolvers import reverse
from olympia.applications.models import AppVersion
from olympia.constants.categories import CATEGORIES
from olympia.files.models import File, FileUpload
from olympia.files.utils import parse_addon
from olympia.lib import happyforms
from olympia.translations.fields import TransField, TransTextarea
from olympia.translations.forms import TranslationFormMixin
from olympia.translations.models import Translation, delete_translation
from olympia.translations.widgets import (
TranslationTextarea, TranslationTextInput)
from olympia.versions.models import (
VALID_SOURCE_EXTENSIONS, ApplicationsVersions, License, Version)
from . import tasks
class AuthorForm(happyforms.ModelForm):
class Meta:
model = AddonUser
exclude = ('addon',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseM | odelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be Non | e if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == amo.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(
ugettext('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
ugettext('At least one author must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
ugettext('An author can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
slug = forms.CharField()
reason = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
super(DeleteForm, self).__init__(*args, **kwargs)
def clean_slug(self):
data = self.cleaned_data
if not data['slug'] == self.addon.slug:
raise forms.ValidationError(ugettext('Slug incorrect.'))
class LicenseRadioChoiceInput(forms.widgets.RadioChoiceInput):
def __init__(self, name, value, attrs, choice, index):
super(LicenseRadioChoiceInput, self).__init__(
name, value, attrs, choice, index)
license = choice[1] # Choice is a tuple (object.id, object).
link = (u'<a class="xx extra" href="%s" target="_blank" '
u'rel="noopener noreferrer">%s</a>')
if hasattr(license, 'url') and license.url:
details = link % (license.url, ugettext('Details'))
self.choice_label = mark_safe(self.choice_label + ' ' + details)
if hasattr(license, 'icons'):
self.attrs['data-cc'] = license.icons
self.attrs['data-name'] = unicode(license)
class LicenseRadioFieldRenderer(forms.widgets.RadioFieldRenderer):
choice_input_class = LicenseRadioChoiceInput
class LicenseRadioSelect(forms.RadioSelect):
renderer = LicenseRadioFieldRenderer
class LicenseForm(AMOModelForm):
builtin = forms.TypedChoiceField(
choices=[], coerce=int,
widget=LicenseRadioSelect(attrs={'class': 'license'}))
name = forms.CharField(widget=TranslationTextInput(),
label=_(u'What is your license\'s name?'),
required=False, initial=_('Custom License'))
text = forms.CharField(widget=TranslationTextarea(), required=False,
label=_(u'Provide the text of your license.'))
def __init__(self, *args, **kwargs):
self.version = kwargs.pop('version', None)
if self.version:
kwargs['instance'], kwargs['initial'] = self.version.license, None
# Clear out initial data if it's a builtin license.
if getattr(kwargs['instance'], 'builtin', None):
kwargs['initial'] = {'builtin': kwargs['instance'].builtin}
kwargs['instance'] = None
self.cc_licenses = kwargs.pop(
'cc', self.version.addon.type == amo.ADDON_STATICTHEME)
else:
self.cc_licenses = kwargs.pop(
'cc', False)
super(LicenseForm, self).__init__(*args, **kwargs)
licenses = License.objects.builtins(
cc=self.cc_licenses).filter(on_form=True)
cs = [(x.builtin, x) for x in licenses]
if not self.cc_licenses:
# creative commons licenses don't have an 'other' option.
cs.append((License.OTHER, ugettext('Other')))
self.fields['builtin'].choices = cs
if (self.version and
self.version.channel == amo.RELEASE_CHANNEL_UNLISTED):
self.fields['builtin'].required = False
class Meta:
model = License
fields = ('builtin', 'name', 'text')
def clean_name(self):
name = self.cleaned_data['name']
return name.strip() or ugettext('Custom License')
def clean(self):
data = self.cleaned_data
if self.errors:
return data
elif data['builtin'] == License.OTHER and not data['text']:
raise forms.ValidationError(
ugettext('License text is required when choosing Other.'))
return data
def get_context(self):
"""Returns a view context dict having keys license_form,
and license_other_val.
"""
return {
'version': self.version,
'license_form': self.version and self,
'license_other_val': License.OTHER
}
def save(self, *args, **kw):
"""Save all form data.
This will only create a new license if it's not one of the builtin
ones.
Keyword arguments
**log=True**
Set to False if you do not want to log this action for display
on the developer dashboard.
"""
log = kw.pop('log', True)
changed = self.changed_data
builtin = self.cleaned_data['builtin']
if builtin == '': # No license chosen, it must be an unlisted add-on.
return
is_other = builtin == License.OTHER
if not is_other:
# We're dealing with a builtin license, there is no modifications
# allowed to it, just return it.
license = License.objects.get(builtin=builtin)
else:
# We're not dealing with a builtin license, so save it to the
# database.
license = super(LicenseForm, self).save(*args, **kw)
if self.version:
if (changed and is_other) or license != self.version.license:
self.version.update(license=license)
if log:
ActivityLo |
michaelBenin/sqlalchemy | lib/sqlalchemy/dialects/postgresql/__init__.py | Python | mit | 1,233 | 0.004866 | # postgresql/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, psycopg2, pg8000, py | postgresql, zxjdbc
base.dialect = psycopg2.dialect
from .base import \
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
TSV | ECTOR
from .constraints import ExcludeConstraint
from .hstore import HSTORE, hstore
from .json import JSON, JSONElement
from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
TSTZRANGE
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID',
'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONElement'
)
|
mfherbst/spack | var/spack/repos/builtin/packages/r-xde/package.py | Python | lgpl-2.1 | 1,935 | 0.000517 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at | the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please als | o see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RXde(RPackage):
"""Multi-level model for cross-study detection of differential gene
expression."""
homepage = "https://www.bioconductor.org/packages/XDE/"
git = "https://git.bioconductor.org/packages/XDE.git"
version('2.22.0', commit='25bcec965ae42a410dd285a9db9be46d112d8e81')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-gtools', type=('build', 'run'))
depends_on('r-mergemaid', type=('build', 'run'))
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.22.0')
|
pwillworth/galaxyharvester | catchupMail.py | Python | gpl-3.0 | 5,380 | 0.021004 | #!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pymysql
import dbInfo
import optparse
import smtplib
from email.message import EmailMessage
from smtplib import SMTPRecipientsRefused
import time
from datetime import timedelta, datetime
import mailInfo
emailIDs = ['spawns', 'activity']
def ghConn():
conn = pymysql.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
conn.autocommit(True)
return conn
def sendAlertMail(conn, userID, msgText, link, alertID, alertTitle, emailIndex):
# Don't try to send mail if we exceeded quota within last hour
lastFailureTime = datetime(2000, 1, 1, 12)
currentTime = datetime.fromtimestamp(time.time())
timeSinceFailure = currentTime - lastFailureTime
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt")
lastFailureTime = datetime.strptime(f.read().strip(), "%Y-%m-%d %H:%M:%S")
f.close()
timeSinceFailure = currentTime - lastFailureTime
except IOError as e:
sys.stdout.write("No last failure time\n")
if timeSinceFailure.days < 1 and timeSinceFailure.seconds < 3660:
sys.stderr.write(str(timeSinceFailure.seconds) + " less than 3660 no mail.\n")
return 1
# look up the user email
cursor = conn.cursor()
cursor.execute("SELECT emailAddress FROM tUsers WHERE userID='" + userID + "';")
row = cursor.fetchone()
if row == None:
result = "bad username"
else:
email = row[0]
if (email.find("@") > -1 and email.find(".") > -1):
# send message
message = EmailMessage()
message['From'] = "\"Galaxy Harvester Alerts\" <" + emailIDs[emailIndex] + "@galaxyharvester.net>"
message['To'] = email
message['Subject'] = "".join(("Galaxy Harvester ", alertTitle))
message.set_content("".join(("Hello ", userID, ",\n\n", msgText, "\n\n", link, "\n\n You can manage your alerts at http://galaxyharvester.net/myAlerts.py\n")))
message.add_alternative("".join(("<div><img src='http://galaxyharvester.net/images/ghLogoLarge.png'/></div><p>Hello ", userID, ",</p><br/><p>", msgText.replace("\n", "<br/>"), "</p><p><a style='text-decoration:none;' href='", link, "'><div style='width:170px;font-size:18px;font-weight:600;color:#feffa1;background-color:#003344;padding:8px;margin:4px;border:1px solid black;'>View in Galaxy Harvester</div></a><br/>or copy and paste link: ", link, "</p><br/><p>You can manage your alerts at <a href='http://galaxyharvester.net/myAlerts.py'>http://galaxyharvester.net/myAlerts.py</a></p><p>-Galaxy Harvester Administrator</p>")), subtype='html')
mailer = smtplib.SMTP(mailInfo.MAIL_HOST)
mailer.login(emailIDs[emailIndex] + "@galaxyharvester.net", mailInfo.MAIL_PASS)
| try:
mailer.send_message(message)
result = 'email sent'
except SMTPRecipientsRefused as e:
result = 'email failed'
sys.stderr.write('Email failed - ' + str(e))
trackEmailFailure(datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S"), emailIndex)
mailer.quit()
# update alert status
if ( result == 'email sent' ):
cursor.execute('UPDATE tAlerts SET alertStatus=1, statusChanged=N | OW() WHERE alertID=' + str(alertID) + ';')
else:
result = 'Invalid email.'
cursor.close()
def main():
emailIndex = 0
# check for command line argument for email to use
if len(sys.argv) > 1:
emailIndex = int(sys.argv[1])
conn = ghConn()
# try sending any backed up alert mails
retryPendingMail(conn, emailIndex)
def trackEmailFailure(failureTime, emailIndex):
# Update tracking file
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt", "w")
f.write(failureTime)
f.close()
except IOError as e:
sys.stderr.write("Could not write email failure tracking file")
def retryPendingMail(conn, emailIndex):
# open email alerts that have not been sucessfully sent less than 48 hours old
minTime = datetime.fromtimestamp(time.time()) - timedelta(days=4)
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTime, alertMessage, alertLink, alertID FROM tAlerts WHERE alertType=2 AND alertStatus=0 and alertTime > '" + minTime.strftime("%Y-%m-%d %H:%M:%S") + "' and alertMessage LIKE '% - %';")
row = cursor.fetchone()
# try to send as long as not exceeding quota
while row != None:
fullText = row[2]
splitPos = fullText.find(" - ")
alertTitle = fullText[:splitPos]
alertBody = fullText[splitPos+3:]
result = sendAlertMail(conn, row[0], alertBody, row[3], row[4], alertTitle, emailIndex)
if result == 1:
sys.stderr.write("Delayed retrying rest of mail since quota reached.\n")
break
row = cursor.fetchone()
cursor.close()
if __name__ == "__main__":
main()
|
voetsjoeba/pyjks | tests/expected/unicode_passwords.py | Python | mit | 5,713 | 0.008927 | public_key = b"\x30\x81\x9f\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89\x02\x81\x81\x00\xaf\x15\xe8" + \
b"\x75\x00\x06\xe4\xc5\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76\x94\xe1\xc1\x5a\xa2\x84\xd7\xad" + \
| b"\x91\x2b\xbf\x42\xe6\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1" + \
b"\x4f\x37\x41\xe7\x4d\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec\xec\xa1\x30\x0e\x42\x9c\xd3\xaa" + | \
b"\x91\x8a\xf4\xcf\x0c\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28\x34\xda\x78\xfe\x8d\x02\x03\x01" + \
b"\x00\x01"
private_key = b"\x30\x82\x02\x78\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x62\x30\x82\x02\x5e\x02\x01" + \
b"\x00\x02\x81\x81\x00\xaf\x15\xe8\x75\x00\x06\xe4\xc5\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76" + \
b"\x94\xe1\xc1\x5a\xa2\x84\xd7\xad\x91\x2b\xbf\x42\xe6\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60" + \
b"\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1\x4f\x37\x41\xe7\x4d\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec" + \
b"\xec\xa1\x30\x0e\x42\x9c\xd3\xaa\x91\x8a\xf4\xcf\x0c\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28" + \
b"\x34\xda\x78\xfe\x8d\x02\x03\x01\x00\x01\x02\x81\x80\x36\xeb\x4b\x50\x2f\xe2\xf9\xad\xa8\xa7\xd7\xf5\x4e\x7b\x03\x92\x02\x7f\x72" + \
b"\x53\x97\x19\xd1\x90\xdd\x6d\x35\xd4\xfb\x7f\x57\xfb\xb4\x69\xa6\xb2\xeb\xa3\x01\xcc\x34\xe9\x99\x43\x3a\x3f\x1f\xff\x84\x75\x40" + \
b"\x1b\x93\x35\x34\x20\x72\x63\x94\x66\xb6\x44\x29\xc1\xf1\xdd\xd4\x65\x3a\x30\xa8\x05\xe4\x53\x54\x44\x78\x8f\x92\x0a\x43\x4d\x82" + \
b"\x51\x54\xdc\x41\xc9\x87\xa5\x98\xc8\x80\xf3\x1b\x91\xc3\x2f\x3d\x32\xf5\xec\x86\x4c\xa7\x4d\xde\x61\x12\xf7\xaf\xd7\x16\x66\xb0" + \
b"\x1f\xf4\xd1\x53\xf6\x8a\x4f\x44\xce\xcd\x22\x85\x81\x02\x41\x00\xf5\xff\xd3\x2a\x39\x3b\x0f\xaf\x64\xc8\x04\x60\x87\xa3\x1b\xe5" + \
b"\x34\x99\x69\x2d\xa8\x2f\x6c\x17\xef\x4e\xc1\xb8\x75\x61\x88\x60\x5e\xbe\x3c\x16\xa0\x87\xda\x3e\x9d\x43\x56\x67\x84\x8d\xbe\x86" + \
b"\x70\x91\x8d\x8f\xf9\x3a\xa4\x0e\x59\xa0\x42\x24\x69\xaa\xb0\x6d\x02\x41\x00\xb6\x34\x11\x08\x6e\x0b\x65\xc0\x9f\x53\xe2\x11\x52" + \
b"\x13\xf0\x6a\x55\x2c\x4a\xb4\xe3\x50\xe8\xe9\x66\x11\xd4\x7a\x85\xbf\x98\xfd\x11\xd9\x9a\x09\xf3\x8f\x69\x53\x70\xbf\xca\xc3\xc5" + \
b"\x72\x8d\x47\xf5\x6a\x37\xc8\x07\x2c\xfd\x43\x9b\x78\x8d\x05\xeb\x1d\xf2\xa1\x02\x41\x00\xac\x7e\x20\x11\xa1\x63\xba\x91\xdf\xf7" + \
b"\x28\xaa\x8f\x31\x5e\x24\x10\x07\xea\x6a\x6b\x5e\x25\x4b\x7b\x30\x1c\x42\x3d\x7c\x90\x66\x12\xc9\x0d\xd5\x47\xe7\x3a\xaf\x61\x12" + \
b"\x90\x89\xb1\xb6\xba\x7c\x06\x7e\xe9\x66\xa4\xf9\xeb\x83\x6c\x71\x25\x2f\xe7\x30\x1a\xd9\x02\x41\x00\x89\x60\xfc\xae\xc4\x7a\x67" + \
b"\x80\x33\x21\xc6\x44\x95\x04\x5f\xb3\x6d\x00\xf6\x5b\x29\x42\x2a\x3b\x41\x30\x94\x6a\xc5\x49\xcf\x8a\x90\xd8\xe7\x62\x35\x78\x9e" + \
b"\x4b\xc1\xa9\x7a\xb2\xdd\xbf\x1f\x73\x70\x41\x64\x49\xb7\xcf\x5e\x2e\x89\x9c\xfd\x87\xc6\xdd\x4f\xc1\x02\x41\x00\xec\xa2\x72\x69" + \
b"\x0b\xd0\x77\x0e\xdc\x8e\x6e\x18\x07\x50\xcb\x22\x37\x95\x87\x38\x6c\xd7\xa7\x2a\xb9\x8e\x83\x66\xb1\x79\x05\x73\xf8\xbc\x50\x57" + \
b"\xd1\x2a\x19\xe5\x49\x85\x5f\xdf\x28\xe0\x96\x9e\xf3\x9d\x70\x6b\x1f\xf8\x60\xb8\xc8\x56\x04\xb1\xfc\x0c\x2c\xcc"
certs = [b"\x30\x82\x01\xac\x30\x82\x01\x15\xa0\x03\x02\x01\x02\x02\x01\x00\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x30" + \
b"\x1c\x31\x1a\x30\x18\x06\x03\x55\x04\x03\x0c\x11\x75\x6e\x69\x63\x6f\x64\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x73\x30\x1e\x17" + \
b"\x0d\x31\x37\x30\x38\x31\x31\x31\x39\x31\x30\x35\x31\x5a\x17\x0d\x31\x39\x30\x38\x31\x31\x31\x39\x31\x30\x35\x31\x5a\x30\x1c\x31" + \
b"\x1a\x30\x18\x06\x03\x55\x04\x03\x0c\x11\x75\x6e\x69\x63\x6f\x64\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x73\x30\x81\x9f\x30\x0d" + \
b"\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x03\x81\x8d\x00\x30\x81\x89\x02\x81\x81\x00\xaf\x15\xe8\x75\x00\x06\xe4\xc5" + \
b"\xd5\xda\x2c\xc5\x63\x6a\xef\xa3\x06\x81\x99\x19\x8f\x2a\xb5\xd3\x2e\x50\x76\x94\xe1\xc1\x5a\xa2\x84\xd7\xad\x91\x2b\xbf\x42\xe6" + \
b"\xb1\x08\x2f\x15\x53\x80\xc1\xa7\xa9\xaf\x22\xd7\x81\x95\xc4\x1e\xea\x4b\x60\x60\xf8\x00\xe5\x9e\x9d\x8a\xe1\x4f\x37\x41\xe7\x4d" + \
b"\xc6\x2e\x9c\xbb\x5c\x03\x5e\x60\x04\x9b\x8b\x3f\x8c\x27\xfc\x1c\x9c\x82\xec\xec\xa1\x30\x0e\x42\x9c\xd3\xaa\x91\x8a\xf4\xcf\x0c" + \
b"\x60\x9b\xb3\xb4\x77\x14\x24\xe3\x22\xcb\xb8\x79\xa6\x3c\x20\xe3\x8d\x09\x28\x34\xda\x78\xfe\x8d\x02\x03\x01\x00\x01\x30\x0d\x06" + \
b"\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b\x05\x00\x03\x81\x81\x00\x42\x54\xd2\x1b\xcb\xdf\x28\x2d\xa0\x5c\x7f\x8e\x82\x90\xae\x79" + \
b"\x3c\x37\x9a\x57\x10\xcb\x43\x09\xb1\x09\xf1\x3e\xa9\x58\x0c\x4c\x16\x9a\xf8\xd2\xa8\x35\x70\xb4\x0c\x9b\xb0\xd3\xef\xce\x54\xbf" + \
b"\x0e\xf0\x19\xf5\x7e\x66\x07\xcb\xcb\x48\x6d\x92\x75\xca\x5c\x54\xa6\x8f\xa8\x47\x8a\x82\x6d\x38\xec\x07\xda\x52\x91\x28\x9b\x5d" + \
b"\x0d\x07\xda\xc3\x22\xd2\x13\x0e\x70\x1e\xc6\xd4\xda\x63\xb3\x3d\xc1\xd3\xfa\xa0\xb5\x1b\x5e\x08\xc5\xfa\x53\x03\x9d\xab\x87\xdc" + \
b"\x63\x19\xf0\x7a\x9e\x93\xfd\xbc\xdc\xbe\x44\x5c\xa5\x82\x73\x0e"]
|
Macpotty/logRobocon | logRobocon/apps/blog/migrations/0005_auto_20160522_1754.py | Python | gpl-3.0 | 496 | 0 | # -*- co | ding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-22 09:54
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
| ('blog', '0004_blogpost_loggeneral'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='logContent',
field=ckeditor.fields.RichTextField(verbose_name='日志正文'),
),
]
|
WhiteMagic/JoystickGremlin | gremlin/ui/dialogs.py | Python | gpl-3.0 | 45,199 | 0.000509 | # -*- coding: utf-8; -*-
# Copyright (C) 2015 - 2019 Lionel Ott
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import sys
import winreg
from PyQt5 import QtCore, QtGui, QtWidgets
import dill
import gremlin
from . import common, ui_about
class OptionsUi(common.BaseDialogUi):
"""UI allowing the configuration of a vari | ety of options."""
def __init__(self, parent=None):
"""Creates a new options UI instance.
:param parent the parent of this widget
"""
super().__init__(parent)
# Actual configuration object being managed
self.config = gremlin.config.Configuration()
self.setMinimumWidth(400)
self.setWindowTitle("Options")
self.main_layout = QtWidgets.QVBoxLayout(self)
self.tab_container = QtWidgets.QTabWidget() |
self.main_layout.addWidget(self.tab_container)
self._create_general_page()
self._create_profile_page()
self._create_hidguardian_page()
def _create_general_page(self):
"""Creates the general options page."""
self.general_page = QtWidgets.QWidget()
self.general_layout = QtWidgets.QVBoxLayout(self.general_page)
# Highlight input option
self.highlight_input = QtWidgets.QCheckBox(
"Highlight currently used input"
)
self.highlight_input.clicked.connect(self._highlight_input)
self.highlight_input.setChecked(self.config.highlight_input)
# Switch to highlighted device
self.highlight_device = QtWidgets.QCheckBox(
"Highlight swaps device tabs"
)
self.highlight_device.clicked.connect(self._highlight_device)
self.highlight_device.setChecked(self.config.highlight_device)
# Close to system tray option
self.close_to_systray = QtWidgets.QCheckBox(
"Closing minimizes to system tray"
)
self.close_to_systray.clicked.connect(self._close_to_systray)
self.close_to_systray.setChecked(self.config.close_to_tray)
# Activate profile on launch
self.activate_on_launch = QtWidgets.QCheckBox(
"Activate profile on launch"
)
self.activate_on_launch.clicked.connect(self._activate_on_launch)
self.activate_on_launch.setChecked(self.config.activate_on_launch)
# Start minimized option
self.start_minimized = QtWidgets.QCheckBox(
"Start Joystick Gremlin minimized"
)
self.start_minimized.clicked.connect(self._start_minimized)
self.start_minimized.setChecked(self.config.start_minimized)
# Start on user login
self.start_with_windows = QtWidgets.QCheckBox(
"Start Joystick Gremlin with Windows"
)
self.start_with_windows.clicked.connect(self._start_windows)
self.start_with_windows.setChecked(self._start_windows_enabled())
# Show message on mode change
self.show_mode_change_message = QtWidgets.QCheckBox(
"Show message when changing mode"
)
self.show_mode_change_message.clicked.connect(
self._show_mode_change_message
)
self.show_mode_change_message.setChecked(
self.config.mode_change_message
)
# Default action selection
self.default_action_layout = QtWidgets.QHBoxLayout()
self.default_action_label = QtWidgets.QLabel("Default action")
self.default_action_dropdown = QtWidgets.QComboBox()
self.default_action_layout.addWidget(self.default_action_label)
self.default_action_layout.addWidget(self.default_action_dropdown)
self._init_action_dropdown()
self.default_action_layout.addStretch()
# Macro axis polling rate
self.macro_axis_polling_layout = QtWidgets.QHBoxLayout()
self.macro_axis_polling_label = \
QtWidgets.QLabel("Macro axis polling rate")
self.macro_axis_polling_value = common.DynamicDoubleSpinBox()
self.macro_axis_polling_value.setRange(0.001, 1.0)
self.macro_axis_polling_value.setSingleStep(0.05)
self.macro_axis_polling_value.setDecimals(3)
self.macro_axis_polling_value.setValue(
self.config.macro_axis_polling_rate
)
self.macro_axis_polling_value.valueChanged.connect(
self._macro_axis_polling_rate
)
self.macro_axis_polling_layout.addWidget(self.macro_axis_polling_label)
self.macro_axis_polling_layout.addWidget(self.macro_axis_polling_value)
self.macro_axis_polling_layout.addStretch()
# Macro axis minimum change value
self.macro_axis_minimum_change_layout = QtWidgets.QHBoxLayout()
self.macro_axis_minimum_change_label = \
QtWidgets.QLabel("Macro axis minimum change value")
self.macro_axis_minimum_change_value = common.DynamicDoubleSpinBox()
self.macro_axis_minimum_change_value.setRange(0.00001, 1.0)
self.macro_axis_minimum_change_value.setSingleStep(0.01)
self.macro_axis_minimum_change_value.setDecimals(5)
self.macro_axis_minimum_change_value.setValue(
self.config.macro_axis_minimum_change_rate
)
self.macro_axis_minimum_change_value.valueChanged.connect(
self._macro_axis_minimum_change_value
)
self.macro_axis_minimum_change_layout.addWidget(
self.macro_axis_minimum_change_label
)
self.macro_axis_minimum_change_layout.addWidget(
self.macro_axis_minimum_change_value
)
self.macro_axis_minimum_change_layout.addStretch()
self.general_layout.addWidget(self.highlight_input)
self.general_layout.addWidget(self.highlight_device)
self.general_layout.addWidget(self.close_to_systray)
self.general_layout.addWidget(self.activate_on_launch)
self.general_layout.addWidget(self.start_minimized)
self.general_layout.addWidget(self.start_with_windows)
self.general_layout.addWidget(self.show_mode_change_message)
self.general_layout.addLayout(self.default_action_layout)
self.general_layout.addLayout(self.macro_axis_polling_layout)
self.general_layout.addLayout(self.macro_axis_minimum_change_layout)
self.general_layout.addStretch()
self.tab_container.addTab(self.general_page, "General")
def _create_profile_page(self):
"""Creates the profile options page."""
self.profile_page = QtWidgets.QWidget()
self.profile_page_layout = QtWidgets.QVBoxLayout(self.profile_page)
# Autoload profile option
self.autoload_checkbox = QtWidgets.QCheckBox(
"Automatically load profile based on current application"
)
self.autoload_checkbox.clicked.connect(self._autoload_profiles)
self.autoload_checkbox.setChecked(self.config.autoload_profiles)
self.keep_last_autoload_checkbox = QtWidgets.QCheckBox(
"Keep profile active on focus loss"
)
self.keep_last_autoload_checkbox.setToolTip("""If this option is off, profiles that have been configured to load automatically when an application gains focus
will deactivate when that application loses focus.
If this option is on, the last active profile will remain active until a different profile is loaded.""")
self.keep_last_autoload_checkbox.clicked.connect(self._keep_last_autoload)
self.keep_last_autoload_checkbox.setChecked(self.config.keep_last_autoload)
self.keep_last_autoload_check |
haard/quarterapp | quarterapp/account.py | Python | mit | 6,524 | 0.011649 | #
# Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import sys
import os
import tornado.web
import tornado.escape
from tornado.options import options
from basehandlers import *
from storage import *
from email_utils import *
from quarter_errors import *
from quarter_utils import *
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(u"/")
class SignupHandler(BaseHandler):
def get(self):
if self.enabled("allow-signups"):
self.render(u"public/signup.html", error = None, username = "")
else:
raise tornado.web.HTTPError(404)
def post(self):
if not self.enabled("allow-signups"):
raise tornado.web.HTTPError(500) |
username = self.get_argument("email", "")
error = False
if len(username) == 0:
error = "empty"
if not username_unique(self.application.db, username):
error = "not_unique"
if not error:
try:
code = os.urandom(16).encode("base64")[:20]
if | send_signup_email(username, code):
signup_user(self.application.db, username, code, self.request.remote_ip)
self.render(u"public/signup_instructions.html")
else:
self.render(u"public/signup.html", error = error, username = username)
except Exception, e:
logging.error("Could not signup user: %s" % (e,))
self.render(u"public/signup.html", error = error, username = username)
else:
self.render(u"public/signup.html", error = error, username = username)
class ActivationHandler(BaseHandler):
def get(self, code_parameter = None):
code = None
if code_parameter:
code = code_parameter
if self.enabled("allow-activations"):
self.render(u"public/activate.html", error = None, code = code)
else:
raise tornado.web.HTTPError(404)
def post(self):
if not self.enabled("allow-activations"):
raise tornado.web.HTTPError(500)
code = self.get_argument("code", "")
password = self.get_argument("password", "")
verify_password = self.get_argument("verify-password", "")
error = None
if len(code) == 0:
error = "not_valid"
if not password == verify_password:
error = "not_matching"
if error:
self.render(u"public/activate.html", error = "not_valid", code = None)
else:
salted_password = hash_password(password, options.salt)
if activate_user(self.application.db, code, salted_password):
# TODO Do login
self.redirect(u"/sheet")
else:
self.render(u"public/activate.html", error = "unknown", code = code)
class ForgotPasswordHandler(BaseHandler):
def get(self):
self.render(u"public/forgot.html", error = None, username = None)
def post(self):
username = self.get_argument("username", "")
error = False
if len(username) == 0:
self.render(u"public/forgot.html", error = "empty", username = username)
else:
reset_code = os.urandom(16).encode("base64")[:20]
if set_user_reset_code(self.application.db, username, reset_code):
send_reset_email(username, reset_code)
self.redirect(u"/reset")
else:
self.render(u"public/forgot.html", error = "unknown", username = username)
class ResetPasswordHandler(BaseHandler):
def get(self, code_parameter = None):
code = None
if code_parameter:
code = code_parameter
self.render(u"public/reset.html", error = None, code = code)
def post(self):
code = self.get_argument("code", "")
password = self.get_argument("password", "")
verify_password = self.get_argument("verify-password", "")
error = None
if len(code) == 0:
error = "not_valid"
if not password == verify_password:
error = "not_matching"
if error:
self.render(u"public/reset.html", error = "unknown", code = code)
else:
salted_password = hash_password(password, options.salt)
if reset_password(self.application.db, code, salted_password):
# TODO Do login
self.redirect(u"/sheet")
else:
self.render(u"public/reset.html", error = "unknown", code = code)
class LoginHandler(AuthenticatedHandler):
def get(self):
allow_signups = self.application.quarter_settings.get_value("allow-signups")
self.render(u"public/login.html", allow_signups = allow_signups)
def post(self):
username = self.get_argument("username", "")
password = self.get_argument("password", "")
hashed_password = hash_password(password, options.salt)
user = authenticate_user(self.application.db, username, hashed_password)
if user:
logging.warn("User authenticated")
self.set_current_user(user)
self.redirect(u"/sheet")
else:
logging.warn("User not authenticated")
self.set_current_user(None)
self.render(u"public/login.html")
|
hassaanm/stock-trading | src/pybrain/rl/learners/meta/meta.py | Python | apache-2.0 | 196 | 0.005102 | __author_ | _ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.learners.learner import Learner
class MetaLearner(Learner):
""" Learners that make use of other Learners, or learn how to learn. """ | |
nkgilley/home-assistant | homeassistant/components/smarthab/light.py | Python | apache-2.0 | 1,765 | 0.001133 | """Support for SmartHab device integration."""
from datetime import timedelta
import logging
import pysmarthab
from requests.exceptions import Timeout
from homeassistant.components.light import LightEntity
from . import DATA_HUB, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SmartHab lights platform."""
hub = hass.data[DOMAIN][DATA_ | HUB]
devices = hub.get_device_list()
_LOGGER.debug("Found a total of %s devices", str(len(devices)))
entities = (
SmartHabLight(light) for light in devices if isinstance(light, pysmarthab.Light)
)
add_entities(entities, T | rue)
class SmartHabLight(LightEntity):
"""Representation of a SmartHab Light."""
def __init__(self, light):
"""Initialize a SmartHabLight."""
self._light = light
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._light.device_id
@property
def name(self) -> str:
"""Return the display name of this light."""
return self._light.label
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._light.state
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self._light.turn_on()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.turn_off()
def update(self):
"""Fetch new state data for this light."""
try:
self._light.update()
except Timeout:
_LOGGER.error(
"Reached timeout while updating light %s from API", self.entity_id
)
|
manqala/erpnext | erpnext/setup/setup_wizard/install_fixtures.py | Python | gpl-3.0 | 14,628 | 0.021192 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_lead_sources = ["Existing Customer", "Reference", "Advertisement",
"Cold Calling", "Exhibition", "Supplier Reference", "Mass Mailing",
"Customer's Vendor", "Campaign", "Walk In"]
def install(country=None):
records = [
# domains
{ 'doctype': 'Domain', 'domain': _('Distribution')},
{ 'doctype': 'Domain', 'domain': _('Manufacturing')},
{ 'doctype': 'Domain', 'domain': _('Retail')},
{ 'doctype': 'Domain', 'domain': _('Services')},
{ 'doctype': 'Domain', 'domain': _('Education')},
{ 'doctype': 'Domain', 'domain': _('Healthcare')},
# Setup Progress
{'doctype': "Setup Progress", "actions": [
{"action_name": _("Add Company"), "action_doctype": "Company", "min_doc_count": 1, "is_completed": 1,
"domains": '[]' },
{"action_name": _("Set Sales Target"), "action_doctype": "Company", "min_doc_count": 99,
"action_document": frappe.defaults.get_defaults().get("company") or '',
"action_field": "monthly_sales_target", "is_completed": 0,
"domains": '["Manufacturing", "Services", "Retail", "Distribution"]' },
{"action_name": _("Add Customers"), "action_doctype": "Customer", "min_doc_count": 1, "is_completed": 0,
"domains": '["Manufacturing", "Services", "Retail", "Distribution"]' },
{"action_name": _("Add Suppliers"), "action_doctype": "Supplier", "min_doc_count": 1, "is_completed": 0,
"domains": '["Manufacturing", "Services", "Retail", "Distribution"]' },
{"action_name": _("Add Products"), "action_doctype": "Item", "min_doc_count": 1, "is_completed": 0,
"domains": '["Manufacturing", "Services", "Retail", "Distribution"]' },
{"action_name": _("Add Programs"), "action_doctype": "Program", "min_doc_count": 1, "is_completed": 0,
"domains": '["Education"]' },
{"action_name": _("Add Instructors"), "action_doctype": "Instructor", "min_doc_count": 1, "is_completed": 0,
"domains": '["Education"]' },
{"action_name": _("Add Courses"), "action_doctype": "Course", "min_doc_count": 1, "is_completed": 0,
"domains": '["Education"]' },
{"action_name": _("Add Rooms"), "action_doctype": "Room", "min_doc_count": 1, "is_completed": 0,
"domains": '["Education"]' },
{"action_name": _("Add Users"), "action_doctype": "User", "min_doc_count": 4, "is_completed": 0,
"domains": '[]' }
]},
# address template
{'doctype':"Address Template", "country": country},
# item group
{'doctype': 'Item Group', 'item_group_name': _('All Item Groups'),
'is_group': 1, 'parent_item_group': ''},
{'doctype': 'Item Group', 'item_group_name': _('Products'),
'is_group': 0, 'parent_item_group': _('All Item Groups'), "show_in_website": 1 },
{'doctype': 'Item Group', 'item_group_name': _('Raw Material'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Services'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Sub Assemblies'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Consumable'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
# salary component
{'doctype': 'Salary Component', 'salary_component': _('Income Tax'), 'description': _('Income Tax'), 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': _('Basic'), 'description': _('Basic'), 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': _('Arrear'), 'description': _('Arrear'), 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': _('Leave Encashment'), 'description': _('Leave Encashment'), 'type': 'Earning'},
# expense claim type
{'doctype': 'Expense Claim Type', 'name': _('Calls'), 'expense_type': _('Calls')},
{'doctype': 'Expense Claim Type', 'name': _('Food'), 'expense_type': _('Food')},
{'doctype': 'Expense Claim Type', 'name': _('Medical'), 'expense_type': _('Medical')},
{'doctype': 'Expense Claim Type', 'name': _('Others'), 'expense_type': _('Others')},
{'doctype': 'Expense Claim Type', 'name': _('Travel'), 'expense_type': _('Travel')},
# leave type
{'doctype': 'Leave Type', 'leave_type_name': _('Casual Leave'), 'name': _('Casual Leave'),
'is_encash': 1, 'is_carry_forward': 1, 'max_days_allowed': '3', 'include_holiday': 1},
{'doct | ype': 'Leave Type', 'leave_type_name': _('Compensatory Off'), 'name' | : _('Compensatory Off'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Sick Leave'), 'name': _('Sick Leave'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Privilege Leave'), 'name': _('Privilege Leave'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Leave Without Pay'), 'name': _('Leave Without Pay'),
'is_encash': 0, 'is_carry_forward': 0, 'is_lwp':1, 'include_holiday': 1},
# Employment Type
{'doctype': 'Employment Type', 'employee_type_name': _('Full-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Part-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Probation')},
{'doctype': 'Employment Type', 'employee_type_name': _('Contract')},
{'doctype': 'Employment Type', 'employee_type_name': _('Commission')},
{'doctype': 'Employment Type', 'employee_type_name': _('Piecework')},
{'doctype': 'Employment Type', 'employee_type_name': _('Intern')},
{'doctype': 'Employment Type', 'employee_type_name': _('Apprentice')},
# Department
{'doctype': 'Department', 'department_name': _('Accounts')},
{'doctype': 'Department', 'department_name': _('Marketing')},
{'doctype': 'Department', 'department_name': _('Sales')},
{'doctype': 'Department', 'department_name': _('Purchase')},
{'doctype': 'Department', 'department_name': _('Operations')},
{'doctype': 'Department', 'department_name': _('Production')},
{'doctype': 'Department', 'department_name': _('Dispatch')},
{'doctype': 'Department', 'department_name': _('Customer Service')},
{'doctype': 'Department', 'department_name': _('Human Resources')},
{'doctype': 'Department', 'department_name': _('Management')},
{'doctype': 'Department', 'department_name': _('Quality Management')},
{'doctype': 'Department', 'department_name': _('Research & Development')},
{'doctype': 'Department', 'department_name': _('Legal')},
# Designation
{'doctype': 'Designation', 'designation_name': _('CEO')},
{'doctype': 'Designation', 'designation_name': _('Manager')},
{'doctype': 'Designation', 'designation_name': _('Analyst')},
{'doctype': 'Designation', 'designation_name': _('Engineer')},
{'doctype': 'Designation', 'designation_name': _('Accountant')},
{'doctype': 'Designation', 'designation_name': _('Secretary')},
{'doctype': 'Designation', 'designation_name': _('Associate')},
{'doctype': 'Designation', 'designation_name': _('Administrative Officer')},
{'doctype': 'Designation', 'designation_name': _('Business Development Manager')},
{'doctype': 'Designation', 'designation_name': _('HR Manager')},
{'doctype': 'Designation', 'designation_name': _('Project Manager')},
{'doctype': 'Designation', 'designation_name': _('Head of Marketing and Sales')},
{'doctype': 'Designation', 'designation_name': _('Software Developer')},
{'doctype': 'Designation', 'designation_name': _('Designer')},
{'doctype': 'Designation', 'designation_name': _('Researcher')},
# territory
{'doctype': 'Territory', 'territory_name': _('All Territories'), 'is_group': 1, 'name': _('All Territories'), 'parent_territory': ''},
# customer group
{'doctype': 'Customer Group', 'customer_group_name': _('All Customer Groups'), 'is_group': 1, 'name': _('All Customer Groups'), 'parent_customer_group': ''},
{'doctype': 'Customer Group', 'customer_group_name': _('Individual'), 'is_group': 0, 'parent_customer_group': _('All Custo |
arximboldi/jpblib | test/jpb_coop.py | Python | gpl-3.0 | 15,395 | 0.006042 | # -*- coding: utf-8 -*-
#
# File: jpb_coop.py
# Author: Juan Pedro Bolívar Puente <raskolnikov@es.gnu.org>
# Date: Fri Jan 20 16:12:23 2012
# Time-stamp: <2012-01-25 20:21:45 jbo>
#
#
# Copyright (C) 2012 Juan Pedro Bolívar Puente
#
# This file is part of jpblib.
#
# jpblib is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 | of the
# License, or (at your option) any later version.
#
# jpblib is distributed in the hope that it will be useful,
# but WITHOU | T ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for jpb.coop
"""
from jpb import coop
from itertools import repeat
import unittest
def make_test_hierarchy(trace, decorator=lambda x:x, metacls=type):
@decorator
class _A(object):
__metaclass__ = metacls
@coop.cooperate
def __init__(self):
trace.append(_A.__init__)
@coop.cooperative
def method(self, mparam):
self._a_mparam = mparam
trace.append(_A.method)
@coop.cooperative
def post_method(self, pmparam):
self._a_pmparam = pmparam
trace.append(_A.post_method)
@decorator
class _B(_A):
__metaclass__ = metacls
@coop.cooperate
def __init__(self, b_param = 'b_param'):
self._b_param = b_param
trace.append(_B.__init__)
@coop.cooperate
def method(self, mparam, b_mparam='b_mparam'):
self._b_mparam = b_mparam
trace.append(_B.method)
@coop.post_cooperate
def post_method(self, pmparam, b_pmparam='b_mparam'):
self._b_pmparam = b_pmparam
trace.append(_B.post_method)
@decorator
class _C(_A):
__metaclass__ = metacls
@coop.cooperate
def __init__(self):
trace.append(_C.__init__)
@coop.cooperate
def method(self, mparam):
self._c_mparam = mparam
trace.append(_C.method)
@coop.post_cooperate
def post_method(self, pmparam):
self._c_pmparam = pmparam
trace.append(_C.post_method)
@decorator
class _D(_B, _C):
__metaclass__ = metacls
@coop.cooperate
def __init__(self, d_param = 'd_param'):
self._d_param = d_param
trace.append(_D.__init__)
@coop.cooperate
def method(self, mparam, d_mparam='d_mparam'):
self._d_mparam = d_mparam
trace.append(_D.method)
@coop.post_cooperate
def post_method(self, pmparam, d_pmparam='d_mparam'):
self._d_pmparam = d_pmparam
trace.append(_D.post_method)
@decorator
class _F(_D, _A):
__metaclass__ = metacls
return _A, _B, _C, _D, _F
class TestCoop(unittest.TestCase):
cls_decorator = coop.cooperative_class
cls_meta = type
def setUp(self):
self._trace = []
self._A, self._B, self._C, self._D, self._F = make_test_hierarchy(
self._trace,
decorator = self.cls_decorator.im_func,
metacls = self.cls_meta)
def test_init_parameter_passing(self):
obj = self._D()
self.assertEqual(obj._b_param, 'b_param')
self.assertEqual(obj._d_param, 'd_param')
obj = self._D(b_param = 'new_b_param')
self.assertEqual(obj._b_param, 'new_b_param')
self.assertEqual(obj._d_param, 'd_param')
obj = self._D(d_param = 'new_d_param')
self.assertEqual(obj._b_param, 'b_param')
self.assertEqual(obj._d_param, 'new_d_param')
obj = self._D(d_param = 'new_d_param',
b_param = 'new_b_param')
self.assertEqual(obj._b_param, 'new_b_param')
self.assertEqual(obj._d_param, 'new_d_param')
def test_init_check_no_positional(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@coop.cooperate
def __init__(self, positional):
pass
self.assertRaises (coop.CooperativeError, make_cls)
def test_init_check_no_variadic(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@coop.cooperate
def __init__(self, *a):
pass
self.assertRaises (coop.CooperativeError, make_cls)
def test_init_check_no_variadic_keywords(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@coop.cooperate
def __init__(self, **k):
pass
self.assertRaises (coop.CooperativeError, make_cls)
def test_init_must_cooperate(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
def __init__(self):
pass
self.assertRaises (coop.CooperativeError, make_cls)
def test_init_must_override(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@coop.cooperative
def __init__(self):
pass
self.assertRaises (coop.CooperativeError, make_cls)
def test_super_params_sends_params(self):
@self.cls_decorator.im_func
class _Fixed(self._F):
__metaclass__ = self.cls_meta
@coop.cooperate_with_params(b_param='fixed_b_param')
def __init__(self):
pass
obj = _Fixed()
self.assertEqual(obj._b_param, 'fixed_b_param')
def test_manual_init(self):
outer_self = self
@self.cls_decorator.im_func
class _Manual(self._D):
__metaclass__ = self.cls_meta
@coop.manual_cooperate
def __init__(self, *a, **k):
super(_Manual, self).__init__(*a, **k)
outer_self._trace.append(_Manual.__init__)
self._clear_trace()
_Manual()
self._check_trace_calls_with_mro(_Manual.__init__)
def test_can_mix_non_cooperative_superclass_single_inherit(self):
class NonCooperativeSuperClass(object):
pass
@self.cls_decorator.im_func
class _Good(NonCooperativeSuperClass):
__metaclass__ = self.cls_meta
self.assertTrue(isinstance(_Good(), _Good))
def test_can_not_mix_non_cooperative_superclass_multi_inherit(self):
class NonCooperativeSuperClass1(object):
pass
class NonCooperativeSuperClass2(object):
pass
def make_class():
@self.cls_decorator.im_func
class _Bad(NonCooperativeSuperClass1,
NonCooperativeSuperClass2):
__metaclass__ = self.cls_meta
self.assertRaises(coop.CooperativeError, make_class)
def test_can_mix_non_cooperative_subclass(self):
class _Good(self._D):
pass
self._clear_trace()
_Good()
self._check_trace_calls_with_mro(self._D.__init__)
def test_abstract_method_forbids_instantiation(self):
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@coop.abstract
def abstract_method(self):
return 0
self.assertRaises(TypeError, _ABC)
def test_override_abstract_method_enables_instantiation(self):
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@coop.abstract
def |
waseem18/oh-mainline | vendor/packages/scrapy/scrapy/contrib/spiders/sitemap.py | Python | agpl-3.0 | 2,111 | 0.002369 | import re
from scrapy.spider import BaseSpider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip
from scrapy import log
class SitemapSpider(BaseSpider):
sitemap_urls = ()
sitemap_rules = [('', 'parse')]
sitemap_follow = ['']
| def __init__(self, *a, **kw):
super(SitemapSpider, self).__init__(*a, **kw)
self._cbs = []
for r, c in self.sitemap_rules:
if isinstance(c, basestring):
c = getattr(self, c)
self._cbs.append((regex(r), c))
self._follow = [regex(x) for x in self.sitemap_follow]
def start_requests(self):
return [Request(x, callback=self._parse_sitemap) for x in self.sitemap_urls]
def _parse_sitemap(self, re | sponse):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.body):
yield Request(url, callback=self._parse_sitemap)
else:
if isinstance(response, XmlResponse):
body = response.body
elif is_gzipped(response):
body = gunzip(response.body)
else:
log.msg("Ignoring non-XML sitemap: %s" % response, log.WARNING)
return
s = Sitemap(body)
if s.type == 'sitemapindex':
for loc in iterloc(s):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def is_gzipped(response):
ctype = response.headers.get('Content-Type', '')
return ctype in ('application/x-gzip', 'application/gzip')
def regex(x):
if isinstance(x, basestring):
return re.compile(x)
return x
def iterloc(it):
for d in it:
yield d['loc']
|
PaavoJokinen/molli | ncurses.py | Python | gpl-3.0 | 460 | 0.017391 | #!/Users/pjjokine/an | aconda/bin/python3
import curses
import os
import signal
from time import sleep
stdscr = curses.initscr()
curses.noecho()
begin_x = 20; begin_y = 7
height = 5; width = 40
win = curses.newwin(height, width, begin_y, begin_x)
count = 5
for x in ran | ge(0,20):
for y in range (0,5):
win.addstr(y, x, "a", curses.A_BLINK)
win.refresh()
sleep(3)
#signal.pause()
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
|
h2oai/h2o-3 | h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7611_early_stop_gam_binomial.py | Python | apache-2.0 | 4,548 | 0.010994 | from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam imp | ort H2OGeneralizedAdditiveEstimator
# In this test, we check that we can do early-stopping with GAM. In particular, we check the following conditions
# 1. run the model without early stopping and check that model with early st | opping runs for fewer iterations
# 2. for models with early stopping, check that early stopping is correctly done.
# 3. when lambda_search is enabled, early stopping should be disabled
def test_gam_model_predict():
print("Checking early-stopping for binomial")
print("Preparing for data....")
h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
h2o_data["C3"] = h2o_data["C3"].asfactor()
h2o_data["C4"] = h2o_data["C4"].asfactor()
h2o_data["C5"] = h2o_data["C5"].asfactor()
h2o_data["C6"] = h2o_data["C6"].asfactor()
h2o_data["C7"] = h2o_data["C7"].asfactor()
h2o_data["C8"] = h2o_data["C8"].asfactor()
h2o_data["C9"] = h2o_data["C9"].asfactor()
h2o_data["C10"] = h2o_data["C10"].asfactor()
myY = "C21"
h2o_data["C21"] = h2o_data["C21"].asfactor()
splits = h2o_data.split_frame(ratios=[0.8], seed=12345)
train = splits[0]
test = splits[1]
early_stop_metrics = ["logloss", "AUC"]
early_stop_valid_metrics = ["validation_logloss", "validation_auc"]
max_stopping_rounds = 3 # maximum stopping rounds allowed to be used for early stopping metric
max_tolerance = 0.1 # maximum tolerance to be used for early stopping metric
bigger_is_better = [False, True]
print("Building a GAM model without early stop")
h2o_model_no_early_stop = H2OGeneralizedAdditiveEstimator(family='binomial', gam_columns=["C11"], scale = [0.0001],
score_each_iteration=True)
h2o_model_no_early_stop.train(x=list(range(0,20)), y=myY, training_frame=train, validation_frame=test)
for ind in range(len(early_stop_metrics)):
print("Building early-stop model")
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial', gam_columns=["C11"], scale = [0.0001],
stopping_rounds=max_stopping_rounds,score_each_iteration=True,
stopping_metric=early_stop_metrics[ind],
stopping_tolerance=max_tolerance)
h2o_model.train(x=list(range(0,20)), y="C21", training_frame=train, validation_frame=test)
metric_list1 = \
pyunit_utils.extract_field_from_twoDimTable(
h2o_model._model_json["output"]["glm_scoring_history"].col_header,
h2o_model._model_json["output"]["glm_scoring_history"].cell_values,
early_stop_valid_metrics[ind])
print("Checking if early stopping has been done correctly for {0}.".format(early_stop_metrics[ind]))
assert pyunit_utils.evaluate_early_stopping(metric_list1, max_stopping_rounds, max_tolerance,
bigger_is_better[ind]), \
"Early-stopping was not incorrect."
print("Check if lambda_search=True, early-stop enabled, an error should be thrown.")
try:
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial', gam_columns=["C11"], scale = [0.0001],
stopping_rounds=max_stopping_rounds,score_each_iteration=True,
stopping_metric=early_stop_metrics[ind],
stopping_tolerance=max_tolerance, lambda_search=True, nlambdas=3)
h2o_model.train(x=list(range(0,20)), y=myY, training_frame=train, validation_frame=test)
assert False, "Exception should have been risen when lambda_search=True and early stop is enabled"
except Exception as ex:
print(ex)
temp = str(ex)
assert ("early stop: cannot run when lambda_search=True. Lambda_search has its own early-stopping "
"mechanism" in temp), "Wrong exception was received."
print("early-stop test passed!")
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_model_predict)
else:
test_gam_model_predict()
|
UCL-INGI/INGInious | inginious/frontend/pages/course_admin/statistics.py | Python | agpl-3.0 | 13,926 | 0.005674 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Utilities for computation of statistics """
from collections import OrderedDict
import flask
from inginious.frontend.pages.course_admin.utils import make_csv, INGIniousSubmissionsAdminPage
from datetime import datetime, date, timedelta
class CourseStatisticsPage(INGIniousSubmissionsAdminPage):
def _tasks_stats(self, tasks, filter, limit):
stats_tasks = self.database.submissions.aggregate(
[{"$match": filter},
{"$limit": limit},
{"$project": {"taskid": "$taskid", "result": "$result"}},
{"$group": {"_id": "$taskid", "submissions": {"$sum": 1}, "validSubmissions":
{"$sum": {"$cond": {"if": {"$eq": ["$result", "success"]}, "then": 1, "else": 0}}}}
},
{"$sort": {"submissions": -1}}])
return [
{"name": tasks[x["_id"]].get_name(self.user_manager.session_language()) if x["_id"] in tasks else x["_id"],
"submissions": x["submissions"],
"validSubmissions": x["validSubmissions"]}
for x in stats_tasks
]
def _users_stats(self, filter, limit):
stats_users = self.database.submissions.aggregate([
{"$match": filter},
{"$limit": limit},
{"$project": {"username": "$username", "result": "$result"}},
{"$unwind": "$username"},
{"$group": {"_id": "$username", "submissions": {"$sum": 1}, "validSubmissions":
{"$sum": {"$cond": {"if": {"$eq": ["$result", "success"]}, "then": 1, "else": 0}}}}
},
{"$limit": limit},
{"$sort": {"submissions": -1}}])
return [
{"name": x["_id"],
"submissions": x["submissions"],
"validSubmissions": x["validSubmissions"]}
for x in stats_users
]
def _graph_stats(self, daterange, filter, limit):
| project = {
"year": {"$year": "$submitted_on"},
"month": {"$month": "$submitted_on"},
"day": {"$dayOfMonth": "$submitted_on"},
"result": "$result"
}
groupby = {"year": "$year", "month | ": "$month", "day": "$day"}
method = "day"
if (daterange[1] - daterange[0]).days < 7:
project["hour"] = {"$hour": "$submitted_on"}
groupby["hour"] = "$hour"
method = "hour"
min_date = daterange[0].replace(minute=0, second=0, microsecond=0)
max_date = daterange[1].replace(minute=0, second=0, microsecond=0)
delta1 = timedelta(hours=1)
if method == "day":
min_date = min_date.replace(hour=0)
max_date = max_date.replace(hour=0)
delta1 = timedelta(days=1)
filter["submitted_on"] = {"$gte": min_date, "$lt": max_date+delta1}
stats_graph = self.database.submissions.aggregate(
[{"$match": filter},
{"$limit": limit},
{"$project": project},
{"$group": {"_id": groupby, "submissions": {"$sum": 1}, "validSubmissions":
{"$sum": {"$cond": {"if": {"$eq": ["$result", "success"]}, "then": 1, "else": 0}}}}
},
{"$sort": {"_id": 1}}])
increment = timedelta(days=(1 if method == "day" else 0), hours=(0 if method == "day" else 1))
all_submissions = {}
valid_submissions = {}
cur = min_date
while cur <= max_date:
all_submissions[cur] = 0
valid_submissions[cur] = 0
cur += increment
for entry in stats_graph:
c = datetime(entry["_id"]["year"], entry["_id"]["month"], entry["_id"]["day"], 0 if method == "day" else entry["_id"]["hour"])
all_submissions[c] += entry["submissions"]
valid_submissions[c] += entry["validSubmissions"]
all_submissions = sorted(all_submissions.items())
valid_submissions = sorted(valid_submissions.items())
return all_submissions, valid_submissions
def submission_url_generator(self, taskid):
""" Generates a submission url """
return "?tasks=" + taskid
def _progress_stats(self, course):
data = list(self.database.user_tasks.aggregate(
[
{
"$match":
{
"courseid": course.get_id(),
"username": {"$in": self.user_manager.get_course_registered_users(course, False)}
}
},
{
"$group":
{
"_id": "$taskid",
"viewed": {"$sum": 1},
"attempted": {"$sum": {"$cond": [{"$ne": ["$tried", 0]}, 1, 0]}},
"attempts": {"$sum": "$tried"},
"succeeded": {"$sum": {"$cond": ["$succeeded", 1, 0]}}
}
}
]))
tasks = course.get_task_dispenser().get_ordered_tasks()
# Now load additional information
result = OrderedDict()
for taskid in tasks:
result[taskid] = {"name": tasks[taskid].get_name(self.user_manager.session_language()), "viewed": 0,
"attempted": 0, "attempts": 0, "succeeded": 0, "url": self.submission_url_generator(taskid)}
for entry in data:
if entry["_id"] in result:
result[entry["_id"]]["viewed"] = entry["viewed"]
result[entry["_id"]]["attempted"] = entry["attempted"]
result[entry["_id"]]["attempts"] = entry["attempts"]
result[entry["_id"]]["succeeded"] = entry["succeeded"]
return result
def _global_stats(self, tasks, filter, limit, best_submissions_list, pond_stat):
submissions = self.database.submissions.find(filter)
if limit is not None:
submissions.limit(limit)
data = list(submissions)
for d in data:
d["best"] = d["_id"] in best_submissions_list # mark best submissions
return compute_statistics(tasks, data, pond_stat)
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
user_input = flask.request.args.copy()
user_input["users"] = flask.request.args.getlist("users")
user_input["audiences"] = flask.request.args.getlist("audiences")
user_input["tasks"] = flask.request.args.getlist("tasks")
user_input["org_tags"] = flask.request.args.getlist("org_tags")
params = self.get_input_params(user_input, course, 500)
return self.page(course, params)
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
user_input = flask.request.form.copy()
user_input["users"] = flask.request.form.getlist("users")
user_input["audiences"] = flask.request.form.getlist("audiences")
user_input["tasks"] = flask.request.form.getlist("tasks")
user_input["org_tags"] = flask.request.form.getlist("org_tags")
params = self.get_input_params(user_input, course, 500)
return self.page(course, params)
def page(self, course, params):
msgs = []
daterange = [None, None]
try:
if params.get('date_before', ''):
daterange[1] = datetime.strptime(params["date_before"], "%Y-%m-%d %H:%M:%S")
if params.get('date_after', ''):
daterange[0] = datetime.strptime(params["date_after"], "%Y-%m-%d %H:%M:%S")
except ValueError: # If match of datetime.strptime() fails
msgs.append(_("Invalid dates"))
if daterange[0] is None or daterange[1] is None:
now = datetime.now().replace(minute=0, second=0, microsecond=0)
daterange = [now - timedel |
oisinmulvihill/stats-service | stats_service/service/restfulhelpers.py | Python | mit | 7,229 | 0.000692 | # -*- coding: utf-8 -*-
"""
Useful classes and methods to aid RESTful webservice development in Pyramid.
PythonPro Limited
2012-01-14
"""
import json
import httplib
import logging
import traceback
#from decorator import decorator
from pyramid.request import Response
def get_log(e=None):
return logging.getLogger("{0}.{1}".format(__name__, e) if e else __name__)
def json_result(view_callable):
"""Return a result dict for a response.
rc = {
"success": True | False,
"data": ...,
"message", "ok" | "..message explaining result=False..",
}
the data field will contain whatever is returned from the response
normal i.e. any valid type.
"""
#log = get_log('json_result')
def inner(request, *args):
"""Add the success status wrapper. exceptions will be
handled elsewhere.
"""
response = dict(success=True, data=None, message="ok")
response['data'] = view_callable(request, *args)
return response
return inner
def status_body(
success=True, data=None, message="", to_json=False, traceback='',
):
"""Create a JSON response body we will use for error and other situations.
:param success: Default True or False.
:param data: Default "" or given result.
:param message: Default "ok" or a user given message string.
:param to_json: Default True, return a JSON string or dict is False.
the to_json is used in situations where something else will take care
of to JSON conversion.
:returns: JSON status response body.
The default response is::
json.dumps(dict(
success=True | False,
data=...,
message="...",
))
"""
# TODO: switch tracebacks off for production
body = dict(
success=success,
data=data,
message=message,
)
if traceback:
body['traceback'] = traceback
if to_json:
body = json.dumps(body)
return body
def status_err(exc, tb):
""" Generate an error status response from an exception and traceback
"""
return status_body("error", str(exc), exc.__class__.__name__, tb,
to_json=False)
#@decorator
def status_wrapper(f, *args, **kw):
""" Decorate a view function to wrap up its response in the status_body
gumph from abov | e, and handle all exceptions.
"""
try:
res = f(*args, **kw)
return status_body(message=res, to_json=False)
except Exception, e:
tb = traceback.format_exc()
get_log().exception(tb)
return status_err(e, tb)
def notfound_404_view(request):
"""A custom 404 view returning JSON error message body inst | ead of HTML.
:returns: a JSON response with the body::
json.dumps(dict(error="URI Not Found '...'"))
"""
msg = str(request.exception.message)
get_log().info("notfound_404_view: URI '%s' not found!" % str(msg))
request.response.status = httplib.NOT_FOUND
request.response.content_type = "application/json"
body = status_body(
success=False,
message="URI Not Found '%s'" % msg,
)
return Response(body)
def xyz_handler(status):
"""A custom xyz view returning JSON error message body instead of HTML.
:returns: a JSON response with the body::
json.dumps(dict(error="URI Not Found '...'"))
"""
log = get_log()
def handler(request):
msg = str(request.exception.message)
log.info("xyz_handler (%s): %s" % (status, str(msg)))
#request.response.status = status
#request.response.content_type = "application/json"
body = status_body(
success=False,
message=msg,
to_json=True,
)
rc = Response(body)
rc.status = status
rc.content_type = "application/json"
return rc
return handler
# Reference:
# * http://zhuoqiang.me/a/restful-pyramid
#
class HttpMethodOverrideMiddleware(object):
'''WSGI middleware for overriding HTTP Request Method for RESTful support
'''
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
if 'POST' == environ['REQUEST_METHOD']:
override_method = ''
# First check the "_method" form parameter
# if 'form-urlencoded' in environ['CONTENT_TYPE']:
# from webob import Request
# request = Request(environ)
# override_method = request.str_POST.get('_method', '').upper()
# If not found, then look for "X-HTTP-Method-Override" header
if not override_method:
override_method = environ.get(
'HTTP_X_HTTP_METHOD_OVERRIDE', ''
).upper()
if override_method in ('PUT', 'DELETE', 'OPTIONS', 'PATCH'):
# Save the original HTTP method
method = environ['REQUEST_METHOD']
environ['http_method_override.original_method'] = method
# Override HTTP method
environ['REQUEST_METHOD'] = override_method
return self.application(environ, start_response)
class JSONErrorHandler(object):
"""Capture exceptions usefully and return to aid the client side.
:returns: status_body set for an error.
E.g.::
rc = {
"success": True | False,
"data": ...,
"message", "ok" | "..message explaining result=False..",
}
the data field will contain whatever is returned from the response
normal i.e. any valid type.
"""
def __init__(self, application):
self.app = application
self.log = get_log("JSONErrorHandler")
def formatError(self):
"""Return a string representing the last traceback.
"""
exception, instance, tb = traceback.sys.exc_info()
error = "".join(traceback.format_tb(tb))
return error
def __call__(self, environ, start_response):
try:
return self.app(environ, start_response)
except Exception, e:
self.log.exception("error: ")
ctype = environ.get('CONTENT_TYPE')
if ctype == "application/json":
self.log.debug("Request was in JSON responding with JSON.")
errmsg = "%d %s" % (
httplib.INTERNAL_SERVER_ERROR,
httplib.responses[httplib.INTERNAL_SERVER_ERROR]
)
start_response(errmsg, [('Content-Type', 'application/json')])
message = str(e)
error = "%s" % (type(e).__name__)
self.log.error("%s: %s" % (error, message))
return status_body(
success=False,
# Should this be disabled on production?
data=self.formatError(),
message=message,
# I need to JSON encode it as the view never finished and
# the requestor is expecting a JSON response status.
to_json=True,
)
else:
raise
|
uhuramedia/django-portlet | portlet/tests/runtests.py | Python | bsd-3-clause | 2,032 | 0.001476 | #!/usr/bin/env python
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py te | st``.
Taken from https://github.com/mbrochh/tdd-with-django-reusable-app
"""
import os
import sys
from django.conf import settings
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
| 'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'portlet',
'django_nose',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
if not settings.configured:
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='portlet.urls',
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), '../templates'),
),
COVERAGE_MODULE_EXCLUDES=COVERAGE_MODULE_EXCLUDES,
COVERAGE_REPORT_HTML_OUTPUT_DIR=os.path.join(
os.path.dirname(__file__), 'coverage')
)
from django_coverage.coverage_runner import CoverageRunner
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(CoverageRunner, NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
pass
def runtests(*test_args):
failures = NoseTestSuiteRunner(verbosity=2, interactive=True).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
cisco/xr-telemetry-m2m-web | src/m2m_demo/frontend/web.py | Python | apache-2.0 | 2,569 | 0.002336 | # =============================================================================
# m2m_demo_plugin.py
#
# Set up the web service.
#
# December 2015
#
# Copyright (c) 2015 by cisco Systems, Inc.
# All rights reserved.
# =============================================================================
import os, re
from twisted.web.resource import Resource, NoResource
from twisted.web.static import File
from twisted.web.server import Site
from m2m_demo.frontend import discover_resources, session
class Root(Resource):
"""
This manages the overall routing of requests for the site
"""
def __init__(self, cfg, static):
Resource.__init__(self)
self.cfg = cfg
self.static = static
self.path_map, self.tab_map = discover_resources()
def getChild(self, name, request):
"""
Dispatch a specific incoming request to an appropriate resource
"""
# First try: static resource
static = self.static.handle_static(request)
if static:
return static
# If that doesn't serve the request, try the plugin dynamic path
if request.path in self.path_map:
print 'using plugin %s for %s' % (self.path_map[request.path], request.path)
cfg = self.cfg.copy()
cfg['tab_map'] = self.tab_map
for arg in request.args:
if arg not in cfg:
cfg[arg] = request.args[arg][0]
# Augment the request with our own session data
request.sdata = session.get_data(request)
return self.path_map[request.path](cfg)
# Nothing else to try
print 'Failed to match path', request.path, 'to any plugins', self.path_map
return NoResource()
class Static(object):
"""
Serve up static assets
"""
def __init__(self, cfg):
static_dir = os.path.join(cfg['assets'], 'static')
| self.files = {}
for root, dirs, files in os.walk(static_dir):
for filename in files:
if not filename.startswith('!'):
fullpath = os.path.join(root, filename)
self.files[filename] = File(fullpath)
| def handle_static(self, request):
r = re.search(r'.*/(.+)', request.path)
if r and r.group(1) in self.files:
return self.files[r.group(1)]
return None
def start(web_cfg):
"""
Start the web service
"""
static = Static(web_cfg)
root_resource = Root(web_cfg, static)
site = Site(root_resource)
return site
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.