repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
GuidoSchmidt/juli
|
src/models/list.py
|
Python
|
gpl-2.0
| 928
| 0
|
#!/usr/bin/env python3
from app.app import db
class List(db.Model):
id = db.Column(db.Integer, primary_key=True)
locked = db.Column(db.Boolean)
weightclass_id = db.Column(db.Integer,
db.ForeignKey("weightclass.id"))
weightclass = db.relationship("Weightclass",
backref=db.backref("weightclass",
|
lazy="dynamic"))
def __init__(self, weightclass):
self.weightclass = weightclass
self.weightclass_id = weightclass.id
self.locked = False
def __repr__(self):
return "<List {} [locked: {}]>"\
.format(self.weightclass, self.locked)
def to_json(self):
return {
"id": self.id,
"weightclass_id": self.weightclass.id,
"weightclas
|
s": self.weightclass.name,
"locked": self.locked
}
|
girving/tensorflow
|
tensorflow/python/ops/ctc_ops.py
|
Python
|
apache-2.0
| 13,730
| 0.002185
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CTC (Connectionist Temporal Classification) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_ctc_ops
from tensorflow.python.ops.nn_grad import _BroadcastMul
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access, invalid-name
@tf_export("nn.ctc_loss")
def ctc_loss(labels, inputs, sequence_length,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False, time_major=True):
"""Computes the CTC (Connectionist Temporal Classification) Loss.
This op implements the CTC loss as presented in the article:
[A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber.
Connectionist Temporal Classification: Labeling Unsegmented Sequence Data
with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA,
pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf)
Input requirements:
```
sequence_length(b) <= time for all b
max(labels.indices(labels.indices[:, 1] == b, 2))
<= sequence_length(b) for all b.
```
Notes:
This class performs the softmax operation for you, so inputs should
be e.g. linear projections of outputs by an LSTM.
The `inputs` Tensor's innermost dimension size, `num_classes`, represents
`num_labels + 1` classes, where num_labels is the number of true labels, and
the largest value `(num_classes - 1)` is reserved for the blank label.
For example, for a vocabulary containing 3 labels `[a, b, c]`,
`num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`.
Regarding the arguments `preprocess_collapse_repeated` and
`ctc_merge_repeated`:
If `preprocess_collapse_repeated` is True, then a preprocessing step runs
before loss calculation, wherein repeated labels passed to the loss
are merged into single labels. This is useful if the training labels come
from, e.g., forced alignments and therefore have unnecessary repetitions.
If `ctc_merge_repeated` is set False, then deep within the CTC calculation,
repeated non-blank labels will not be merged and are interpreted
as individual labels. This is a simplified (non-standard) version of CTC.
Here is a table of the (roughly) expected first order behavior:
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True`
Classical CTC behavior: Outputs true repeated classes with blanks in
between, and can also output repeated classes with no blanks in
between that need to be col
|
lapsed by the decoder.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False`
Never learns to output repeated classes, as they are collapsed
in the input labels before training.
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False`
Outputs repeated classes with blanks in between, but generally does not
require the decoder to collapse/merge rep
|
eated classes.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True`
Untested. Very likely will not learn to output repeated classes.
The `ignore_longer_outputs_than_inputs` option allows to specify the behavior
of the CTCLoss when dealing with sequences that have longer outputs than
inputs. If true, the CTCLoss will simply return zero gradient for those
items, otherwise an InvalidArgument error is returned, stopping training.
Args:
labels: An `int32` `SparseTensor`.
`labels.indices[i, :] == [b, t]` means `labels.values[i]` stores
the id for (batch b, time t).
`labels.values[i]` must take on values in `[0, num_labels)`.
See `core/ops/ctc_ops.cc` for more details.
inputs: 3-D `float` `Tensor`.
If time_major == False, this will be a `Tensor` shaped:
`[batch_size, max_time, num_classes]`.
If time_major == True (default), this will be a `Tensor` shaped:
`[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector, size `[batch_size]`.
The sequence lengths.
preprocess_collapse_repeated: Boolean. Default: False.
If True, repeated labels are collapsed prior to the CTC calculation.
ctc_merge_repeated: Boolean. Default: True.
ignore_longer_outputs_than_inputs: Boolean. Default: False.
If True, sequences with longer outputs than inputs will be ignored.
time_major: The shape format of the `inputs` Tensors.
If True, these `Tensors` must be shaped `[max_time, batch_size,
num_classes]`.
If False, these `Tensors` must be shaped `[batch_size, max_time,
num_classes]`.
Using `time_major = True` (default) is a bit more efficient because it
avoids transposes at the beginning of the ctc_loss calculation. However,
most TensorFlow data is batch-major, so by this function also accepts
inputs in batch-major form.
Returns:
A 1-D `float` `Tensor`, size `[batch]`, containing the negative log
probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
"""
# The second, third, etc output tensors contain the gradients. We use it in
# _CTCLossGrad() below.
if not isinstance(labels, sparse_tensor.SparseTensor):
raise TypeError("Expected labels (first argument) to be a SparseTensor")
# For internal calculations, we transpose to [time, batch, num_classes]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N)
loss, _ = gen_ctc_ops.ctc_loss(
inputs,
labels.indices,
labels.values,
sequence_length,
preprocess_collapse_repeated=preprocess_collapse_repeated,
ctc_merge_repeated=ctc_merge_repeated,
ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
return loss
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLoss")
def _CTCLossGrad(op, grad_loss, _):
"""The derivative provided by CTC Loss.
Args:
op: the CTCLoss op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss gradient.
"""
# Outputs are: loss, grad
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
# Return gradient for inputs and None for
# labels_indices, labels_values and sequence_length
return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
@tf_export("nn.ctc_greedy_decoder")
def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True):
"""Performs greedy decoding on the logits given in input (best path).
Note: Regardless of the value of merge_repeated, if the maximum index of a
given time and batch corresponds to the blank index `(num_classes - 1)`, no
new element is emitted.
If `merge_repeated` is `True`, merge repeated classes in output.
This means that if consecutive logits' maximum indices are the same,
only the first of these is emitted.
|
fajoy/nova
|
nova/openstack/common/rpc/impl_zmq.py
|
Python
|
apache-2.0
| 22,999
| 0.000217
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
import socket
import string
import sys
import types
import uuid
import eventlet
from eventlet.green import zmq
import greenlet
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('nova.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
# These globals are defined in register_opts(conf),
# a mandatory initialization call
CONF = None
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return str(jsonutils.dumps(data, ensure_ascii=True))
except TypeError:
LOG.error(_("JSON serialization failed."))
raise
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = ZMQ_CTX.socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
# Linger -1 prevents lost/dropped messages
try:
self.sock.close(linger=-1)
except Exception:
pass
self.sock = None
def recv(self):
if not
|
self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send
|
(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
if serialize:
data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send([str(msg_id), str(topic), str('cast'),
_serialize(data)])
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since the
|
winiciuscota/OG-Bot
|
ogbot/scraping/movement.py
|
Python
|
mit
| 3,821
| 0.003664
|
from bs4 import BeautifulSoup
from datetime import datetime
from scraper import *
from general import General
def get_arrival_time(arrival_time_str):
time = datetime.strptime(arrival_time_str.strip(), '%H:%M:%S').time()
now = datetime.now()
arrival_time = datetime.combine(now, time)
return arrival_time
class Movement(Scraper):
def __init__(self, browser, config):
super(Movement, self).__init__(browser, config)
self.general_client = General(browser, config)
def get_fleet_movement_from_movement_page(self):
"""
Deprecated, use get_fleet_movement instead
:return:
"""
url = self.url_provider.get_page_url('movement')
res = self.open_url(url)
soup = BeautifulSoup(res.read(), "lxml")
movement_nodes = soup.findAll("div", {"class": "fleetDetails detailsOpened"})
fleet_movements = []
for movement_node in movement_nodes:
origin_planet_coords = self.parse_coords(movement_node.find("span", {"class": "originCoords"}).text)
origin_planet_name = movement_node.find("span", {"class": "originPlanet"}).text.strip()
destination_coords = self.parse_coords(
movement_node.find("span", {"class": "destinationCoords tooltip"}).text)
movement = FleetMovement(origin_planet_coords, origin_planet_name, destination_coords)
fleet_movements.append(movement)
return fleet_movements
def get_fleet_movement(self):
url = self.url_provider.get_page_url('eventList')
res = self.open_url(url)
soup = BeautifulSoup(res.read(), "lxml")
movement_table = soup.find("table", {"id": "eventContent"})
movement_rows = movement_table.findAll("tr", {"class": "eventFleet"})
fleet_movements = []
for movement_row in movement_rows:
origin_coords = self.parse_coords(movement_row.find("td", {"class": "coordsOrigin"}).text.strip())
origin_planet_name = movement_row.find("td", {"class": "originFleet"}).text.strip()
dest_coords = self.parse_coords(movement_row.find("td", {"class": "destCoords"}).text.strip())
dest_planet_name = movement_row.find("td", {"class": "destFleet"}).text.strip()
count_down_td = movement_row.find("td", {"c
|
lass": "countDown"})
is_friendly = 'friendly' in count_down_td.attrs['class']
arrival_time_str = movement_row.find("td", {"class": "arrivalTime"}).text
arrival_time = get_arrival_time(arrival_time_str)
countdown_time = self.get_countdown_time(arrival_time)
|
movement = FleetMovement(origin_coords, origin_planet_name, dest_coords, dest_planet_name, is_friendly,
arrival_time, countdown_time)
fleet_movements.append(movement)
return fleet_movements
def get_countdown_time(self, arrival_time):
game_time = self.general_client.get_game_datetime()
return arrival_time - game_time
@staticmethod
def parse_coords(text):
return text.replace('[', '').replace(']', '')
def get_fleet_slots_usage(self):
"""
Get fleet slot usage data. Only works if there is at least 1 fleet in movement
"""
url = self.url_provider.get_page_url('movement')
res = self.open_url(url)
soup = BeautifulSoup(res.read())
slots_info_node = soup.find("span", {"class", "fleetSlots"})
if slots_info_node is not None:
current_slots = int(slots_info_node.find("span", {"class", "current"}).text)
all_slots = int(slots_info_node.find("span", {"class", "all"}).text)
else:
current_slots = 0
all_slots = 1
return current_slots, all_slots
|
realizeapp/realize-core
|
core/commands/frontend.py
|
Python
|
agpl-3.0
| 1,248
| 0.004006
|
from flask.ext.script import Command, Manager, Option
from flask import current_app
import os
from subprocess import Popen
class InvalidPathException(Exception):
pass
class SyncJS(Command):
option_list = (
Option('--path', '-p', dest='path'),
)
def run_command(self, command):
cmd = Popen(command, shell=True, cwd=self.cwd)
cmd.wait()
def run(self, path=None):
if path is None:
raise InvalidPathException
path = os.path.expanduser(path)
self.cwd = os.path.abspath(path)
frontend_path = os.path.abspa
|
th(os.path.join(current_app.config['REPO_PATH'], current_app.con
|
fig['FRONTEND_PATH']))
for the_file in os.listdir(current_app.config['FRONTEND_PATH']):
file_path = os.path.join(current_app.config['FRONTEND_PATH'], the_file)
try:
if os.path.isfile(file_path) and the_file != ".vc":
os.unlink(file_path)
except Exception, e:
print e
self.run_command("npm install")
self.run_command("grunt default")
self.run_command("rm -rf {0}/*".format(frontend_path))
self.run_command("cp -a dist/* {0}".format(os.path.abspath(frontend_path)))
|
naveensan1/nuage-openstack-neutron
|
nuage_neutron/plugins/common/service_plugins/l3.py
|
Python
|
apache-2.0
| 57,131
| 0.000035
|
# Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
|
Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES
|
OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from logging import handlers
import netaddr
from nuage_neutron.plugins.common import constants
from nuage_neutron.plugins.common import exceptions as nuage_exc
from nuage_neutron.plugins.common.extensions import nuage_router
from nuage_neutron.plugins.common import nuagedb
from nuage_neutron.plugins.common.time_tracker import TimeTracker
from nuage_neutron.plugins.common import utils as nuage_utils
from nuage_neutron.vsdclient.common.helper import get_l2_and_l3_sub_id
from oslo_config import cfg
from oslo_log.formatters import ContextFormatter
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.callbacks import resources
from neutron.db import api as db
from neutron.extensions import l3
from neutron_lib import constants as lib_constants
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import helpers
from nuage_neutron.plugins.nuage_ml2.nuage_ml2_wrapper import NuageL3Wrapper
LOG = logging.getLogger(__name__)
class NuageL3Plugin(NuageL3Wrapper):
supported_extension_aliases = ['router',
'nuage-router',
'nuage-floatingip',
'extraroute',
'ext-gw-mode']
def __init__(self):
super(NuageL3Plugin, self).__init__()
self._l2_plugin = None
self._default_np_id = None
self.init_fip_rate_log()
@property
def core_plugin(self):
if self._l2_plugin is None:
self._l2_plugin = directory.get_plugin()
return self._l2_plugin
@property
def default_np_id(self):
if self._default_np_id is None:
self._default_np_id = directory.get_plugin(
constants.NUAGE_APIS).default_np_id
return self._default_np_id
def get_plugin_type(self):
return lib_constants.L3
def get_plugin_description(self):
return "Plugin providing support for routers and floatingips."
def init_fip_rate_log(self):
self.def_fip_rate = cfg.CONF.FIPRATE.default_fip_rate
self.def_ingress_rate_kbps = (
cfg.CONF.FIPRATE.default_ingress_fip_rate_kbps)
self.def_egress_rate_kbps = (
cfg.CONF.FIPRATE.default_egress_fip_rate_kbps)
self._validate_fip_rate_value(self.def_fip_rate, 'default_fip_rate')
self._validate_fip_rate_value(self.def_ingress_rate_kbps,
'default_ingress_fip_rate_kbps',
units='kbps')
if cfg.CONF.FIPRATE.default_egress_fip_rate_kbps is not None:
self._validate_fip_rate_value(self.def_egress_rate_kbps,
'default_egress_fip_rate_kbps',
units='kbps')
self.fip_rate_log = None
if cfg.CONF.FIPRATE.fip_rate_change_log:
formatter = ContextFormatter()
formatter.conf.logging_context_format_string = (
'%(asctime)s %(levelname)s [%(user_name)s] %(message)s')
self.fip_rate_log = logging.getLogger('neutron.nuage.fip.rate')
handler = handlers.WatchedFileHandler(
cfg.CONF.FIPRATE.fip_rate_change_log)
handler.setFormatter(formatter)
self.fip_rate_log.logger.addHandler(handler)
else:
self.fip_rate_log = LOG
def _validate_fip_rate_value(self, fip_value, attribute, units='mbps'):
if fip_value < -1:
raise cfg.ConfigFileValueError(_('%s can not be < -1') % attribute)
if self.def_fip_rate > constants.MAX_VSD_INTEGER:
raise cfg.ConfigFileValueError(_('%(attr)s cannot be > %(max)s') %
{'attr': attribute,
'max': constants.MAX_VSD_INTEGER})
if units == 'kbps' and int(fip_value) != fip_value:
raise cfg.ConfigFileValueError(_('%s cannot be'
' in fraction') % attribute)
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def add_router_interface(self, context, router_id, interface_info):
session = context.session
rtr_if_info = super(NuageL3Plugin, self).add_router_interface(
context, router_id, interface_info)
try:
network = self.core_plugin.get_network(context,
rtr_if_info['network_id'])
if not self.is_vxlan_network(network):
return rtr_if_info
if network['router:external']:
msg = _("Subnet in external network cannot be an interface of "
"a router.")
raise nuage_exc.NuageBadRequest(msg=msg)
return self._nuage_add_router_interface(context,
interface_info,
router_id,
rtr_if_info,
session)
except Exception:
with excutils.save_and_reraise_exception():
super(NuageL3Plugin, self).remove_router_interface(
context, router_id, interface_info)
def _nuage_add_router_interface(self, context, interface_info,
router_id, rtr_if_info, session):
if 'port_id' in interface_info:
port_id = interface_info['port_id']
port = self.core_plugin._get_port(context, port_id)
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id)
port_params = {'neutron_port_id': port['id']}
if subnet_l2dom['nuage_l2dom_tmplt_id']:
port_params['l2dom_id'] = subnet_l2dom['nuage_subnet_id']
else:
port_params['l3dom_id'] = subnet_l2dom['nuage_subnet_id']
vport = self.vsdclient.get_nuage_vport_by_neutron_id(
port_params,
required=False)
if vport:
self.vsdclient.delete_nuage_vport(vport['ID'])
else:
subnet_id = rtr_if_info['subnet_id']
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id)
l2domain_id = subnet_l2dom['nuage_subnet_id']
subnet = self.core_plugin.get_subnet(context, subnet_id)
vsd_zone = self.vsdclient.get_zone_by_routerid(
router_id, subnet['shared'])
self._nuage_validate_add_rtr_itf(
session, router_id, subnet, subnet_l2dom, vsd_zone)
filters = {
'fixed_ips': {'subnet_id': [subnet_id]},
'device_owner': [constants.DEVICE_OWNER_DHCP_NUAGE]
}
gw_ports = self.core_plugin.get_ports(context, filters=filters)
for port in gw_ports:
self.core_plugin.delete_port(context, port['id'])
pnet_binding = nuagedb.get_network_binding(context.session,
subnet['network_id'])
with nuage_utils.rollback() as on_exc, \
session.begin(subtransactions=True):
vsd_subnet = self.vsdclient.create_domain_subnet(
vsd_zone, subnet, pnet_binding)
|
dnalexander/CMPM146_P7
|
p7_driver.py
|
Python
|
gpl-3.0
| 2,598
| 0.051193
|
import subprocess
import json
import collections
import random
import sys
def parse_json_result(out):
"""Parse the provided JSON text and extract a dict
representing the predicates described in the first solver result."""
result = json.loads(out)
assert len(result['Call']) > 0
assert len(result['Call'][0]['Witnesses']) > 0
witness = result['Call'][0]['Witnesses'][0]['Value']
class identitydefaultdict(collections.defaultdict):
def __missing__(self, key):
return key
preds = collections.defaultdict(set)
env = identitydefaultdict()
for atom in witness:
if '(' in atom:
left = atom.index('(')
functor = atom[:left]
arg_string = atom[left:]
try:
preds[functor].add( eval(arg_string, env) )
except TypeError:
pass # at least we tried...
else:
preds[atom] = True
return dict(preds)
def solve():
gringo = subprocess.Popen("gringo level-core.lp level-style.lp level-sim.lp level-shortcuts.lp -c width=7",
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
reify = subprocess.Popen("reify",
stdin = gringo.stdout,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
clingo = subprocess.Popen("clingo - meta.lp metaD.lp metaO.lp metaS.lp --parallel-mode=4 --outf=2",
stdin = reify.stdout,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out, err = clingo.communicate()
return parse_json_result(out)
def render_ascii_dungeon(design):
"""Given a dict of predicates, return an ASCII-art depiction of the a dungeon."""
sprite = dict(design['sprite'])
param = dict(design['param'])
width = param['width']
glyph = dict(space='.', wall='W', altar='a', gem='g', trap='_')
block = ''.join([''.join([glyph[sprite.get((r,c),'space')]+' ' for c in range(width)])+'\n' for r in range(width)])
return block
def render_ascii_touch(design, target):
"""Given a dict of predicates, return an ASCII-art depiction where the player explored
while in the `target` state."""
touch = collections.defaultdict(lambda: '-')
for cell, state in design['touch']:
if state == target:
touch[cell] = str(target)
param = dict(design['param'])
width = par
|
am['width']
block = ''.join([''.join([str(touch[r,c])+' ' for c in range(width)])+'\n' for r in range(width)])
return block
def side_by_side(*blocks):
"""Horizontally merge two ASCII-art pictures."""
lines = []
fo
|
r tup in zip(*map(lambda b: b.split('\n'), blocks)):
lines.append(' '.join(tup))
return '\n'.join(lines)
def main():
map = solve()
print side_by_side(render_ascii_dungeon(map), *[render_ascii_touch(map,i) for i in range(1,4)])
main()
|
houshengbo/nova_vmware_compute_driver
|
nova/utils.py
|
Python
|
apache-2.0
| 39,324
| 0.000509
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
import os
import pyclbr
import random
import re
import shutil
import signal
import socket
import struct
import sys
import tempfile
import time
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
import netaddr
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
monkey_patch_opts = [
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
'nova.compute.api:nova.notifier.api.notify_decorator'
],
help='List of modules/decorators to monkey patch'),
]
utils_opts = [
cfg.IntOpt('password_length',
default=12,
help='Length of generated instance admin passwords'),
cfg.BoolOpt('disable_process_locking',
default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.StrOpt('rootwrap_config',
default="/etc/nova/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
]
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
CONF.register_opts(utils_opts)
CONF.import_opt('glance_host', 'nova.config')
CONF.import_opt('glance_port', 'nova.config')
CONF.import_opt('glance_protocol', 'nova.config')
CONF.import_opt('service_down_time', 'nova.config')
LOG = logging.getLogger(__name__)
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
corresponding filter to etc/nova/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
exception.ProcessExecutionError unless
program exits with one of these code.
:param delay_on_retry: True | False. Defaults to True. If set to
True, wait a short amount of time
before retrying.
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is run with rootwrap.
:raises exception.NovaException: on receiving unknown arguments
:raises exception.ProcessExecutionError:
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwar
|
gs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
if len(kwargs):
raise exception.NovaException(_('Got unknown keyword args '
|
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
cmd = ['sudo', 'nova-rootwrap', CONF.rootwrap_config] + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
|
gion86/awlsim
|
awlsim/core/instructions/insn_gt_d.py
|
Python
|
gpl-2.0
| 1,598
| 0.015645
|
# -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the Licen
|
se, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from
|
__future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_GT_D(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_GT_D, rawInsn)
self.assertOpCount(0)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
accu1, accu2 = self.cpu.accu1.getSignedDWord(),\
self.cpu.accu2.getSignedDWord()
if accu1 == accu2:
s.A1, s.A0, s.VKE = 0, 0, 0
elif accu1 > accu2:
s.A1, s.A0, s.VKE = 0, 1, 0
else:
s.A1, s.A0, s.VKE = 1, 0, 1
s.OV, s.OR, s.STA, s.NER = 0, 0, s.VKE, 1
|
jptomo/rpython-lang-scheme
|
rpython/jit/backend/x86/test/test_list.py
|
Python
|
mit
| 256
| 0.003906
|
from rpython.jit.metainterp.test.test_list imp
|
ort ListTests
from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
class TestList(Jit386Mixin, ListTest
|
s):
# for individual tests see
# ====> ../../../metainterp/test/test_list.py
pass
|
Conjuror/fxos-certsuite
|
mcts/utils/handlers/adb_b2g.py
|
Python
|
mpl-2.0
| 12,568
| 0.002387
|
import ConfigParser
import datetime
import os
import posixpath
import re
import shutil
import tempfile
import time
import traceback
from mozdevice import adb
from mozlog.structured import get_default_logger
here = os.path.split(__file__)[0]
class WaitTimeout(Exception):
pass
class DeviceBackup(object):
def __init__(self, backup_dirs=None, backup_files=None):
self.device = ADBB2G()
self.logger = self.device._logger
if backup_dirs is None:
backup_dirs = ["/data/local",
"/data/b2g/mozilla"]
self.backup_dirs = backup_dirs
if backup_files is None:
backup_files = ["/system/etc/hosts"]
self.backup_files = backup_files
def local_dir(self, remote):
return os.path.join(self.backup_path, remote.lstrip("/"))
def __enter__(self):
self.backup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def backup(self):
self.logger.info("Backing up device")
self.backup_path = tempfile.mkdtemp()
for remote_path in self.backup_dirs:
local_path = self.local_dir(remote_path)
if not os.path.exists(local_path):
os.makedirs(local_path)
self.device.pull(remote_path, local_path)
for remote_path in self.backup_files:
remote_dir, filename = remote_path.rsplit("/", 1)
local_dir = self.local_dir(remote_dir)
local_path = os.path.join(local_dir, filename)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self.device.pull(remote_path, local_path)
return self
def restore(self):
self.logger.info("Restoring device state")
self.device.remount()
for remote_path in self.backup_files:
remote_dir, filename = remote_path.rsplit("/", 1)
local_path = os.path.join(self.local_dir(remote_dir), filename)
self.device.rm(remote_path)
self.device.push(local_path, remote_path)
for remote_path in self.backup_dirs:
local_path = self.local_dir(remote_path)
self.device.rm(remote_path, recursive=True)
self.device.push(local_path, remote_path)
def cleanup(self):
shutil.rmtree(self.backup_path)
class PushFile(object):
"""Context manager that installs a file onto the device, and removes it again"""
def __init__(self, device, local, remote):
self.device = device
self.local = local
self.remote = remote
def __enter__(self, *args, **kwargs):
if self.remote.startswith("/system/"):
self.device.remount()
self.device.push(self.local, self.remote)
def __exit__(self, *args, **kwargs):
self.device.rm(self.remote)
def poll_wait(func, polling_interval=1.0, timeout=30, after_first=None):
start_time = time.time()
ran_first = False
current_time = time.time()
while current_time - start_time < timeout:
value = func()
if value:
return value
if not ran_first and after_first is not None:
after_first()
ran_first = True
sleep = max(current_time + polling_interval - time.time(), 0)
time.sleep(sleep)
current_time = time.time()
raise WaitTimeout()
class ADBB2G(adb.ADBDevice):
def __init__(self, *args, **kwargs):
if "wait_polling_interval" in kwargs:
self._wait_polling_interval = kwargs.pop("wait_polling_interval")
else:
self._wait_polling_interval = 1.0
adb.ADBDevice.__init__(self, *args, **kwargs)
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
|
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_f
|
ile))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first)
def wait_for_net(self, timeout=None, wait_polling_interval=None):
"""Wait for the device to be assigned an IP address.
:param timeout: Maximum time to wait for an IP address to be defined
:param wait_polling_interval: Interval at which to poll for ip address.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for network connection")
poll_wait(self.get_ip_address, timeout=timeout)
def stop(self, timeout=None):
self._logger.info("Stopping b2g process")
if timeout is None:
timeout = self._timeout
self.shell_bool("stop b2g")
def b2g_stopped():
processes = set(item[1].split("/")[-1] for item in self.get_process_list())
return "b2g" not in processes
poll_wait(b2g_stopped, timeout=timeout)
def start(self, wait=True, timeout=None, wait_polling_interval=None):
"""Start b2g, waiting for the adb connection to become stable.
:param wait:
:param timeout: Maximum time to wait for restart.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
self._logger.info("Starting b2g process")
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
if wait:
self.wait_for_device_ready(timeout,
after_first=lambda:self.shell_bool("start b2g",
timeout=timeout))
else:
self.shell_bool("start b2g", timeout=timeout)
def restart(self, wait=True, timeout=None, wait_polling_interval=None):
"""Restart b2g, waiting for the adb connection to become stable.
:pa
|
sunlaiqi/fundiy
|
src/shop/views.py
|
Python
|
mit
| 1,636
| 0.005501
|
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
# Create your views here.
from django.views.generic import ListView, DetailView
from .models import Category, Product
from cart.forms import CartAddProductForm
def category_list(request):
return render(request, "shop/category_list.html",
{'nodes': Category.objects.all()})
'''
class CategoryList(ListView):
model = Category
template_name = "category_list.html"
'''
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request, "shop/product_list.html",
{'category': category,
'nodes': categories,
'products': products,})
'''
class ProductList(ListView):
model = DesignProduct
template_name = "shop/product_list.html"
'''
def product_detail(request, id, slug):
categories = Category.objects.al
|
l()
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product_det
|
ail.html',
{'product': product,
'nodes': categories,
'cart_product_form': cart_product_form})
|
edx/credentials
|
credentials/wsgi.py
|
Python
|
agpl-3.0
| 559
| 0
|
"""
WSGI config for credentials.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1
|
.8/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
from dja
|
ngo.core.wsgi import get_wsgi_application
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "credentials.settings.local")
application = get_wsgi_application() # pylint: disable=invalid-name
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
Python
|
apache-2.0
| 17,835
| 0.000729
|
"""
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
"""
# pylint: disable=function-redefined
import logging
import pprint
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.exceptions import SaltCloudSystemExit
from salt.utils.functools import namespaced_function
from salt.utils.versions import LooseVersion as _LooseVersion
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion("1.4.0"):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append("/etc/ssl/certs/YaST-CA.pem")
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = "cloudstack"
# Only load in this module if the CLOUDSTACK configurations are
|
in place
def __virtual__():
"""
Set up the libcloud functions and check for CloudStack configurations.
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
|
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("apikey", "secretkey", "host", "path"),
)
def get_dependencies():
"""
Warn if dependencies aren't met.
"""
return config.check_driver_dependencies(__virtualname__, {"libcloud": HAS_LIBS})
def get_conn():
"""
Return a conn object for the passed VM data
"""
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value(
"verify_ssl_cert",
get_configured_provider(),
__opts__,
default=True,
search_global=False,
)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
"Could not disable SSL certificate verification. Not loading module."
)
return driver(
key=config.get_cloud_config_value(
"apikey", get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
"secretkey", get_configured_provider(), __opts__, search_global=False
),
secure=config.get_cloud_config_value(
"secure",
get_configured_provider(),
__opts__,
default=True,
search_global=False,
),
host=config.get_cloud_config_value(
"host", get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
"path", get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
"port",
get_configured_provider(),
__opts__,
default=None,
search_global=False,
),
)
def get_location(conn, vm_):
"""
Return the node location to use
"""
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value("location", vm_, __opts__, default=2)
for location in locations:
if str(loc) in (str(location.id), str(location.name)):
return location
def get_security_groups(conn, vm_):
"""
Return a list of security groups to use, defaulting to ['default']
"""
securitygroup_enabled = config.get_cloud_config_value(
"securitygroup_enabled", vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
"securitygroup", vm_, __opts__, default=["default"]
)
else:
return False
def get_password(vm_):
"""
Return the password to use
"""
return config.get_cloud_config_value(
"password",
vm_,
__opts__,
default=config.get_cloud_config_value(
"passwd", vm_, __opts__, search_global=False
),
search_global=False,
)
def get_key():
"""
Returns the ssh private key for VM access
"""
return config.get_cloud_config_value(
"private_key", get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
"""
Return the keypair to use
"""
keypair = config.get_cloud_config_value("keypair", vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
"""
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
"""
try:
ip = data.public_ips[0]
except Exception: # pylint: disable=broad-except
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
"""
Return the networkid to use, only valid for Advanced Zone
"""
networkid = config.get_cloud_config_value("networkid", vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
"""
Return the project to use.
"""
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning(
"Cannot get projects, you may need to update libcloud to 0.15 or later"
)
return False
projid = config.get_cloud_config_value("projectid", vm_, __opts__)
if not projid:
return False
for project in projects:
if str(projid) in (str(project.id), str(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__,
_get_active_provider_name() or "cloudstack",
|
lot9s/pathfinder-rpg-utils
|
data-mining/bestiary/db/creatureDB.py
|
Python
|
mit
| 6,403
| 0.00531
|
'''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=Fa
|
lse):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines
|
the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
|
karllessard/tensorflow
|
tensorflow/python/keras/feature_column/sequence_feature_column_test.py
|
Python
|
apache-2.0
| 28,269
| 0.003007
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import combinations
from tensorflow.python.keras.feature_column import sequence_feature_column as ksfc
from tensorflow.python.keras.saving import model_config
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc.embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
# Test that columns ar
|
e reordered alphabetically.
sequence_input_layer = ksfc.SequenceFeatures(
[embedding_column_b, em
|
bedding_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b,})
self.evaluate(variables_lib.global_variables_initializer())
weights = sequence_input_layer.weights
self.assertCountEqual(
('sequence_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in weights]))
self.assertAllEqual(embedding_values_a, self.evaluate(weights[0]))
self.assertAllEqual(embedding_values_b, self.evaluate(weights[1]))
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a, dimension=2)
sequence_input_layer = ksfc.SequenceFeatures([embedding_column_a])
with self.assertRaisesRegex(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
_, _ = sequence_input_layer({'aaa': sparse_input})
def test_shared_embedding_column(self):
with ops.Graph().as_default():
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
|
memo/tensorflow
|
tensorflow/python/layers/core_test.py
|
Python
|
apache-2.0
| 14,077
| 0.006607
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers
|
.Dense(4, name='my_dense')
dense(inputs)
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
|
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testFunctionalDense(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
self.assertEqual(outputs.get_shape().as_list(), [5, 2])
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2)
vars2 = variables.trainable_variables()
self.assertEqual(len(vars1), 2)
s
|
snicoper/snicoper.com
|
tests/unit/base_test.py
|
Python
|
mit
| 2,024
| 0
|
import json
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
UserModel = get_user_model()
class BaseTestCase(TestCase):
"""Utilidades para todos los tests relacionados con el sitio.
Incluye Fixtures para los modelos, propiedades a los
modelos mas utilizados como.
user_model
anuncio_model
test_settings de django.conf.settings
"""
fixtures = [
'accounts.json',
'sites.json'
]
def setUp(self):
"""Ayudas para los test.
Attributes:
user_model (User): Modelo de User.
settings: LazySettings()
user (User): Usuario pk=1
"""
super().setUp()
self.user_model = UserModel
self.
|
test_settings = settings
self.user = self.user_model.objects.get(pk=1)
def login(self, username=None, password=None):
"""Login de usuario.
Si no se pasan username y password usara por defecto self.user.username
y 123 respectivamente.
Args:
username (str): Nombre de usuario.
|
password (str): Password de usuario.
Returns:
bool: True si loguea, False en caso contrario.
"""
username = self.user.username if username is None else username
password = '123' if password is None else password
return self.client.login(username=username, password=password)
def logout(self):
self.client.logout()
def load_data(self, path_data):
"""Obtener de un .json datos.
Args:
path_data (str): path con el archivo a leer.
Returns:
dict: Diccionario con los datos del json
Raises:
FileNotFoundError: Si no existe el .json en el la ruta indicada.
"""
if not os.path.exists(path_data):
raise FileNotFoundError
with open(path_data, 'r') as fh:
data = json.load(fh)
return data
|
its-lab/MoniTutor-Tunnel
|
start_couchDB_resultwriter.py
|
Python
|
gpl-3.0
| 2,457
| 0.002849
|
import argparse
import logging
import signal
import time
from server.couchDB_resultwriter import CouchDbResultWriter as ResultWriter
import sys
import os
from utils import daemonize
from utils import get_logger
from utils import configure_logging
parser = argparse.ArgumentParser(description="MoniTunnel server")
parser.add_argument("-a", "--rabbit-mq-host", default="localhost", help="Address of the rabbit-mq server")
parser.add_argument("-v", "--verbose", action="count", help="Increase verbosity. -vvvvv ==
|
DEBUG")
parser.add_argument("-l", "--logging", action="store_true", help="Write messages to syslog instead of stdout. Increase verbosity of logs with -v")
parser.add_argument("-t", "--task-exchange", default="task_exchange", help="Name of the task exchange")
parser.add_argument("-r", "--result-exchange", default="result_exchange", he
|
lp="Name of the result exchange")
parser.add_argument("-d", "--daemonize", action="store_true", help="Start as daemon")
parser.add_argument("-i", "--couch-db-url", default="http://couchdb:5984", help="CouchDB server API url")
parser.add_argument("-u", "--couch-db-user")
parser.add_argument("-p", "--couch-db-password")
config = vars(parser.parse_args())
result_writer = ResultWriter(config["rabbit_mq_host"],
config["result_exchange"],
config["task_exchange"],
config["couch_db_url"],
couch_db_user=config["couch_db_user"],
couch_db_password=config["couch_db_password"])
logger = get_logger(config["verbose"])
configure_logging(logger, config["logging"])
def signal_handler(signum, frame):
logging.warn("SIGNAL " + str(signum) + " received! Frame: " + str(frame))
logging.debug("Stop ResultWriter thread")
result_writer.stop()
logging.debug("Wait for ResultWriter thread to join")
result_writer.join()
logging.debug("ResultWriter thread joined")
if config["daemonize"]:
os.remove("/var/run/monitunnel.pid")
sys.exit(0)
if "__main__" == __name__:
if config["daemonize"]:
daemonize()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGALRM, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
logging.debug("Start ResultWriter Thread")
result_writer.start()
run = True
while run:
time.sleep(1)
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/tracemalloc.py
|
Python
|
lgpl-2.1
| 17,610
| 0.000227
|
from collections.abc import Sequence, Iterable
from functools import total_ordering
import fnmatch
import linecache
import os.path
import pickle
# Import types and functions implemented in C
from _tracemalloc import *
from _tracemalloc import _get_object_traceback, _get_traces
def _format_size(size, sign):
for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
if abs(size) < 100 and unit != 'B':
# 3 digits (xx.x UNIT)
if sign:
return "%+.1f %s" % (size, unit)
else:
return "%.1f %s" % (size, unit)
if abs(size) < 10 * 1024 or unit == 'TiB':
# 4 or 5 digits (xxxx UNIT)
if sign:
return "%+.0f %s" % (size, unit)
else:
return "%.0f %s" % (size, unit)
size /= 1024
class Statistic:
"""
Statistic difference on memory allocations between two Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'count')
def __init__(self, traceback, size, count):
self.traceback = traceback
self.size = size
self.count = count
def __hash__(self):
return hash((self.traceback, self.size, self.count))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.count == other.count)
def __str__(self):
text = ("%s: size=%s, count=%i"
% (self.traceback,
_format_size(self.size, False),
self.count))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<Statistic traceback=%r size=%i count=%i>'
% (self.traceback, self.size, self.count))
def _sort_key(self):
return (self.size, self.count, self.traceback)
class StatisticDiff:
"""
Statistic difference on memory allocations between an old and a new
Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
def __init__(self, traceback, size, size_diff, count, count_diff):
self.traceback = traceback
self.size = size
self.size_diff = size_diff
self.count = count
self.count_diff = count_diff
def __hash__(self):
return hash((self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.size_diff == other.size_diff
and self.count == other.count
and self.count_diff == other.count_diff)
def __str__(self):
text = ("%s: size=%s (%s), count=%i (%+i)"
% (self.traceback,
_format_size(self.size, False),
_format_size(self.size_diff, True),
self.count,
self.count_diff))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<StatisticDiff traceback=%r size=%i (%+i) count=%i (%+i)>'
% (self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def _sort_key(self):
return (abs(self.size_diff), self.size,
abs(self.count_diff), self.count,
self.traceback)
def _compare_grouped_stats(old_group, new_group):
statistics = []
for traceback, stat in new_group.items():
previous = old_group.pop(traceback, None)
if previous is not None:
stat = StatisticDiff(traceback,
stat.size, stat.size - previous.size,
|
stat.count, stat.count - previous.count)
else:
stat = StatisticDiff(traceback,
stat.size, stat.size,
|
stat.count, stat.count)
statistics.append(stat)
for traceback, stat in old_group.items():
stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
statistics.append(stat)
return statistics
@total_ordering
class Frame:
"""
Frame of a traceback.
"""
__slots__ = ("_frame",)
def __init__(self, frame):
# frame is a tuple: (filename: str, lineno: int)
self._frame = frame
@property
def filename(self):
return self._frame[0]
@property
def lineno(self):
return self._frame[1]
def __eq__(self, other):
return (self._frame == other._frame)
def __lt__(self, other):
return (self._frame < other._frame)
def __hash__(self):
return hash(self._frame)
def __str__(self):
return "%s:%s" % (self.filename, self.lineno)
def __repr__(self):
return "<Frame filename=%r lineno=%r>" % (self.filename, self.lineno)
@total_ordering
class Traceback(Sequence):
"""
Sequence of Frame instances sorted from the oldest frame
to the most recent frame.
"""
__slots__ = ("_frames",)
def __init__(self, frames):
Sequence.__init__(self)
# frames is a tuple of frame tuples: see Frame constructor for the
# format of a frame tuple; it is reversed, because _tracemalloc
# returns frames sorted from most recent to oldest, but the
# Python API expects oldest to most recent
self._frames = tuple(reversed(frames))
def __len__(self):
return len(self._frames)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Frame(trace) for trace in self._frames[index])
else:
return Frame(self._frames[index])
def __contains__(self, frame):
return frame._frame in self._frames
def __hash__(self):
return hash(self._frames)
def __eq__(self, other):
return (self._frames == other._frames)
def __lt__(self, other):
return (self._frames < other._frames)
def __str__(self):
return str(self[0])
def __repr__(self):
return "<Traceback %r>" % (tuple(self),)
def format(self, limit=None, most_recent_first=False):
lines = []
if limit is not None:
if limit > 0:
frame_slice = self[-limit:]
else:
frame_slice = self[:limit]
else:
frame_slice = self
if most_recent_first:
frame_slice = reversed(frame_slice)
for frame in frame_slice:
lines.append(' File "%s", line %s'
% (frame.filename, frame.lineno))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
lines.append(' %s' % line)
return lines
def get_object_traceback(obj):
"""
Get the traceback where the Python object *obj* was allocated.
Return a Traceback instance.
Return None if the tracemalloc module is not tracing memory allocations or
did not trace the allocation of the object.
"""
frames = _get_object_traceback(obj)
if frames is not None:
return Traceback(frames)
else:
return None
class Trace:
"""
Trace of a memory block.
"""
__slots__ = ("_trace",)
def __init__(self, trace):
# trace is a tuple: (domain: int, size: int, traceback: tuple).
# See Traceback constructor for the format of the traceback tuple.
self._trace = trace
@property
def domain(self):
return self._trace[0]
@property
def size(self):
return self._trace[1]
@property
def traceback(self):
return
|
mmagnus/rna-pdb-tools
|
rna_tools/tools/rna_filter/rna_get_dists.py
|
Python
|
gpl-3.0
| 9,035
| 0.006419
|
#!/usr/bin/env python
"""rna_filter.py - calculate distances based on given restrants on PDB files or SimRNA trajectories.
The format of restraints::
(d:A1-A2 < 10.0 1) = if distance between A1 and A2 lower than 10.0, score it with 1
Usage::
$ python rna_filter.py -r test_data/restraints.txt -s test_data/CG.pdb
d:A1-A2 10.0 measured: 6.58677550096 [x]
test_data/CG.pdb 1.0 1 out of 1
# $ python rna_filter.py -r test_data/restraints.txt -t test_data/CG.trafl
(d:A1-A2 < 10.0 1)|(d:A2-A1 <= 10 1)
restraints [('A1', 'A2', '<', '10.0', '1'), ('A2', 'A1', '<=', '10', '1')]
Frame #1 e:1252.26
mb for A1 [ 54.729 28.9375 41.421 ]
mb for A2 [ 55.3425 35.3605 42.7455]
d:A1-A2 6.58677550096
mb for A2 [ 55.3425 35.3605 42.7455]
mb for A1 [ 54.729 28.9375 41.421 ]
d:A2-A1 6.58677550096
# this ^ is off right now
"""
from __future__ import print_function
import logging
from rna_tools.rna_tools_logging import logger
from rna_tools.tools.rna_calc_rmsd.lib.rmsd.calculate_rmsd import get_coordinates
from rna_tools.tools.extra_functions.select_fragment import select_pdb_fragment_pymol_style, select_pdb_fragment
from rna_tools.tools.simrna_trajectory.simrna_trajectory import SimRNATrajectory
import argparse
import re
import numpy as np
logger.setLevel(logging.DEBUG)
logger.propagate = False
class RNAFilterErrorInRestraints(Exception):
pass
def parse_logic(restraints_fn, verbose):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
# ignore comments
(d:A1-A2 < 10.0 1)|(d:A2-A1 <= 10 1)
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
txt = ''
with open(restraints_fn) as f:
for l in f:
if not l.startswith('#'):
txt += l.strip()
if verbose:
logger.info(txt)
restraints = re.findall(
'\(d:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)\)', txt)
return restraints
def parse_logic_newlines(restraints_fn, offset=0, verbose=False):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
# ignore comments
d:Y23-Y69 < 25.0
d:Y22-Y69 < 25.0
# d:<chain><resi_A>-<resi_B> <operator> <distance> <weight>; each restraints in a new line
Raises:
__main__.RNAFilterErrorInRestraints: Please check the format of your restraints!
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
restraints = []
with open(restraints_fn) as f:
for l in f:
if l.strip():
if not l.startswith('#'):
if verbose:
logger.info(l)
restraint = re.findall(
'd:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)', l)
if restraint:
# without [0] it is restraints [[('Y23', 'Y69', '<', '25.0', '1')], [('Y22', 'Y69', '<', '25.0', '1')]]
# why? to convert 'Y23', 'Y69', '<', '25.0', '1' -> 'Y23', 'Y69', '<', 25.0, 1
start = restraint[0][0][0] + str(int(restraint[0][0][1:]) + offset)
end = restraint[0][1][0] + str(int(restraint[0][1][1:]) + offset)
restraints.append([start, end, restraint[0][1], restraint[0][2],
float(restraint[0][3]), float(restraint[0][4])])
if len(restraints) == 0:
raise RNAFilterErrorInRestraints('Please check the format of your restraints!')
return restraints # [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
def get_distance(a, b):
diff = a - b
return np.sqrt(np.dot(diff, diff))
def parse_pdb(pdb_fn, selection):
"""
{'A9': {'OP1': array([ 53.031, 21.908, 40.226]), 'C6': array([ 54.594, 27.595, 41.069]), 'OP2': array([ 52.811, 24.217, 39.125]), 'N4': array([ 53.925, 30.861, 39.743]), "C1'": array([ 55.611, 26.965, 43.258]), "C3'": array([ 53.904, 25.437, 43.809]), "O5'": array([ 53.796, 24.036, 41.353]), 'C5': array([ 54.171, 28.532, 40.195]), "O4'": array([ 55.841, 25.746, 42.605]), "C5'": array([ 54.814, 23.605, 42.274]), 'P': array(
[ 53.57 , 23.268, 39.971]), "C4'": array([ 55.119, 24.697, 43.283]), "C2'": array([ 54.563, 26.706, 44.341]), 'N1': array([ 55.145, 27.966, 42.27 ]), "O2'": array([ 55.208, 26.577, 45.588]), 'N3': array([ 54.831, 30.285, 41.747]), 'O2': array([ 55.76 , 29.587, 43.719]), 'C2': array([ 55.25
|
8, 29
|
.321, 42.618]), "O3'": array([ 53.272, 24.698, 44.789]), 'C4': array([ 54.313, 29.909, 40.572])}}
"""
V = {}
with open(pdb_fn) as f:
for line in f:
if line.startswith("ATOM"):
curr_chain_id = line[21]
curr_resi = int(line[22: 26])
curr_atom_name = line[12: 16].strip()
if selection:
if curr_chain_id in selection:
if curr_resi in selection[curr_chain_id]:
x = line[30: 38]
y = line[38: 46]
z = line[46: 54]
# V.append(np.asarray([x,y,z],dtype=float))
if curr_chain_id + str(curr_resi) in V:
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
else:
V[curr_chain_id + str(curr_resi)] = {}
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
return V
def check_condition(condition, wight):
"""return True/False, score"""
pass
def get_residues(pdb_fn, restraints, verbose):
residues = set()
for h in restraints:
a = h[0]
b = h[1]
a = a[0] + ':' + a[1:]
residues.add(a) # A19
b = b[0] + ':' + b[1:]
residues.add(b)
# set(['A:41', 'A:9', 'A:10', 'A:16'])
selection = ','.join(residues)
selection_parsed = select_pdb_fragment(selection, separator=",", splitting="[,:;]")
residues = parse_pdb(pdb_fn, selection_parsed)
# get mb
for r in residues:
if 'N9' in residues[r]: # A,G
residues[r]['mb'] = residues[r]['N9'] - ((residues[r]['N9'] - residues[r]['C6']) / 2)
else: # A,G
residues[r]['mb'] = residues[r]['N1'] - ((residues[r]['N1'] - residues[r]['C4']) / 2)
for r in residues:
if verbose:
logger.info(' '.join(['mb for ', str(r), str(residues[r]['mb'])]))
return residues
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', "--restraints_fn",
dest="restraints_fn",
required=True,
help="""restraints_fn:
Format:
(d:A9-A41 < 10.0 1)|(d:A41-A9 <= 10 1)
""")
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument('-s', dest="structures", help='structures',
nargs='+') # , type=string)
parser.add_argument(
'--offset', help='use offset to adjust your restraints to numbering in PDB files, ade (1y26)'
'pdb starts with 13, so offset is -12)', default=0, type=int)
parser.add_argument('-t', dest="trajectory", help="SimRNA trajectory")
return parser
def calc_dists_for_pdbs(pdb_files, pairs, verbose):
"""
"
|
backtrace-labs/backtrace-python
|
tests/__init__.py
|
Python
|
mit
| 3,903
| 0.003587
|
import simplejson as json
import os
import subprocess
import sys
import unittest
if sys.version_info.major >= 3:
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
tests_dir = os.path.dirname(os.path.realpath(__file__))
exe_dir = os.path.join(tests_dir, "exe")
debug_backtrace = False
def check_basic_report(obj):
assert obj['lang'] == "python"
assert obj['agent'] == "backtrace-python"
assert obj['classifiers'][0] == "NameError"
if obj['langVersion'].startswith("PyPy"):
assert obj['attributes']['error.message'] == "global name 'b' is not defined"
else:
assert obj['attributes']['error.message'] == "name 'b' is not defined"
source_code_id = obj['threads'][obj['mainThread']]['stack'][0]['sourceCode']
assert obj['sourceCode'][source_code_id]['path'].endswith("tests/exe/simple_report.py")
assert obj['sourceCode'][source_code_id]['text'].endswith("\na = b\n")
assert obj['attributes']['a'] == 1
assert obj['attributes']['b'] == "bar"
def check_multi_file(obj):
if sys.version_info.major >= 3:
assert obj['classifiers'][0] == "JSONDecodeError"
assert obj['attributes']['error.message'] == "Expecting value: line 1 column 1 (char 0)"
elif obj['langVersion'].startswith("PyPy"):
assert obj['classifiers'][0] == "ValueError"
assert obj['attributes']['error.message'] == "Error when decoding true at char 1"
else:
assert obj['classifiers'][0] == "ValueError"
assert obj['attributes']['error.message'] == "No JSON object could be decoded"
fault_stack = obj['threads'][obj['mainThread']]['stack']
source_code_id = fault_stack[-1]['sourceCode']
assert obj['sourceCode'][source_code_id]['path'].endswith("tests/exe/multi_file.py")
lines = obj['sourceCode'][source_code_id]['text'].split("\n")
assert lines[fault_stack[-1]['line'] - 1] == 'call_a_file(True)'
assert fault_stack[-6]['funcName'] == "bar"
assert fault_stack[-6]['line'] == 4
def check_send_report(obj):
if sys.version_info.major >= 3:
assert obj['attributes']['error.message'] == "dsa"
assert obj['attributes']['genre'] == 'happy hardcore'
assert obj['annotations']['color'] == 'blue'
def check_threads(obj):
if sys.version_info.major >= 3:
assert len(obj['threads']) == 4
def run_one_test(check_fn, exe_name):
requested_server_address = ("127.0.0.1", 0)
class non_local:
json_object = None
class RequestHandler(BaseHTTPRequestHandler):
def do
|
_POST(self):
self.send_response(200)
self.end_headers()
payload = self.rfile.read()
json_string = payload.decode('utf-8', 'strict')
non_local.json_object = json.l
|
oads(json_string)
def log_message(self, format, *args):
pass
httpd = HTTPServer(requested_server_address, RequestHandler)
host, port = httpd.server_address
exe_path = os.path.join(exe_dir, exe_name)
stdio_action = None if debug_backtrace else subprocess.PIPE
child = subprocess.Popen([sys.executable, exe_path, host, str(port)],
stdout=stdio_action, stderr=stdio_action)
httpd.handle_request()
check_fn(non_local.json_object)
child.wait()
if stdio_action is not None:
child.stdout.close()
child.stderr.close()
httpd.server_close()
class TestErrorReports(unittest.TestCase):
def test_basic_report(self):
run_one_test(check_basic_report, "simple_report.py")
def test_multi_file(self):
run_one_test(check_multi_file, "multi_file.py")
def test_send_report(self):
run_one_test(check_send_report, "send_report.py")
def test_threads(self):
run_one_test(check_threads, "threads.py")
|
abhishek-ch/hue
|
desktop/core/src/desktop/conf.py
|
Python
|
apache-2.0
| 37,361
| 0.009716
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import socket
import stat
import subprocess
from django.utils.translation import ugettext_lazy as _
from desktop.redaction.engine import parse_redaction_policy_from_file
from desktop.lib.conf import Config, ConfigSection, UnspecifiedConfigSection,\
coerce_bool, coerce_csv, coerce_json_dict,\
validate_path, list_of_compiled_res, coerce_str_lowercase
from desktop.lib.i18n import force_unicode
from desktop.lib.paths import get_desktop_root
def coerce_database(database):
if database == 'mysql':
return 'django.db.backends.mysql'
elif database == 'postgres' or database == 'postgresql_psycopg2':
return 'django.db.backends.postgresql_psycopg2'
elif database == 'oracle':
return 'django.db.backends.oracle'
elif database in ('sqlite', 'sqlite3'):
return 'django.db.backends.sqlite3'
else:
return str(database)
def coerce_port(port):
port = int(port)
if port == 0:
return ''
else:
return port
def coerce_password_from_script(script):
p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE)
password = p.communicate()[0]
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, script)
# whitespace may be significant in the password, but most files have a
# trailing newline.
return password.strip('\n')
HTTP_HOST = Config(
key="http_host",
help=_("HTTP host to bind to."),
type=str,
default="0.0.0.0")
HTTP_PORT = Config(
key="http_port",
help=_("HTTP port to bind to."),
type=int,
default=8888)
HTTP_ALLOWED_METHODS = Config(
key="http_allowed_methods",
help=_("HTTP methods the server will be allowed to service."),
type=coerce_csv,
private=True,
default=['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT'])
SSL_CERTIFICATE = Config(
key="ssl_certificate",
help=_("Filename of SSL Certificate"),
default=None)
SSL_PRIVATE_KEY = Config(
key="ssl_private_key",
help=_("Filename of SSL RSA Private Key"),
default=None)
SSL_CIPHER_LIST = Config(
key="ssl_cipher_list",
help=_("List of allowed and disallowed ciphers"),
default="DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2")
SSL_PASSWORD = Config(
key="ssl_password",
help=_("SSL password of the the certificate"),
default=None)
SSL_PASSWORD_SCRIPT = Config(
key="ssl_password_script",
help=_("Execute this script to produce the SSL password. This will be used when `ssl_password` is not set."),
type=coerce_password_from_script,
default=None)
LDAP_PASSWORD = Config(
key="ldap_password",
help=_("LDAP password of the hue user used for LDAP authentications. For example for LDAP Authentication with HiveServer2/Impala."),
private=True,
default=None)
LDAP_PASSWORD_SCRIPT = Config(
key="ldap_password_script",
help=_("Execute this script to produce the LDAP password. This will be used when `ldap_password` is not set."),
private=True,
type=coerce_password_from_script,
default=None)
LDAP_USERNAME = Config(
key="ldap_username",
help=_("LDAP username of the hue user used for LDAP authentications. For example for LDAP Authentication with HiveServer2/Impala."),
private=True,
default="hue")
ENABLE_SERVER = Config(
key="enable_server",
help=_("If set to false, runcpserver will not actually start the web server. Used if Apache is being used as a WSGI container."),
type=coerce_bool,
default=True)
CHERRYPY_SERVER_THREADS = Config(
key="cherrypy_server_threads",
help=_("Number of threads used by the CherryPy web server."),
type=int,
default=40)
SECRET_KEY = Config(
key="secret_key",
help=_("Used in hashing algorithms for sessions."),
default="")
SECRET_KEY_SCRIPT = Config(
key="secret_key_script",
help=_("Execute this script to produce the Django secret key. This will be used when `secret_key` is not set."),
type=coerce_password_from_script,
private=True,
default=None)
USER_ACCESS_HISTORY_SIZE = Config(
key="user_access_history_size",
help=_("Number of user access to remember per view per user."),
type=int,
default=10)
COLLECT_USAGE = Config(
key="collect_usage",
help=_("Help improve Hue with anonymous usage analytics."
"Use Google Analytics to see how many times an application or specific section of an application is used, nothing more."),
type=coerce_bool,
default=True)
POLL_ENABLED = Config(
key="poll_enabled",
he
|
lp=_("Use poll(2) in Hue thrift pool."),
type=coerce_bool,
private=True,
default=True
|
)
MIDDLEWARE = Config(
key="middleware",
help=_("Comma-separated list of Django middleware classes to use. " +
"See https://docs.djangoproject.com/en/1.4/ref/middleware/ for " +
"more details on middlewares in Django."),
type=coerce_csv,
default=[])
REDIRECT_WHITELIST = Config(
key="redirect_whitelist",
help=_("Comma-separated list of regular expressions, which match the redirect URL."
"For example, to restrict to your local domain and FQDN, the following value can be used:"
" ^\/.*$,^http:\/\/www.mydomain.com\/.*$"),
type=list_of_compiled_res(skip_empty=True),
default='^\/.*$')
SECURE_PROXY_SSL_HEADER = Config(
key="secure_proxy_ssl_header",
help=_("Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER."),
type=coerce_bool,
default=False)
APP_BLACKLIST = Config(
key='app_blacklist',
default='',
type=coerce_csv,
help=_('Comma separated list of apps to not load at server startup.')
)
DEMO_ENABLED = Config( # Internal and Temporary
key="demo_enabled",
help=_("To set to true in combination when using Hue demo backend."),
type=coerce_bool,
private=True,
default=False)
LOG_REDACTION_FILE = Config(
key="log_redaction_file",
help=_("Use this file to parse and redact log message."),
type=parse_redaction_policy_from_file,
default=None)
ALLOWED_HOSTS = Config(
key='allowed_hosts',
default=['*'],
type=coerce_csv,
help=_('Comma separated list of strings representing the host/domain names that the Hue server can serve.')
)
def is_https_enabled():
return bool(SSL_CERTIFICATE.get() and SSL_PRIVATE_KEY.get())
#
# Email (SMTP) settings
#
_default_from_email = None
def default_from_email():
"""Email for hue@<host-fqdn>"""
global _default_from_email
if _default_from_email is None:
try:
fqdn = socket.getfqdn()
except IOError:
fqdn = 'localhost'
_default_from_email = "hue@" + fqdn
return _default_from_email
def default_database_options():
"""Database type dependent options"""
if DATABASE.ENGINE.get().endswith('oracle'):
return {'threaded': True}
elif DATABASE.ENGINE.get().endswith('sqlite3'):
return {'timeout': 30}
else:
return {}
SMTP = ConfigSection(
key='smtp',
help=_('Configuration options for connecting to an external SMTP server.'),
members=dict(
HOST = Config(
key="host",
help=_("The SMTP server for email notification delivery."),
type=str,
default="localhost"
),
PORT = Config(
key="port",
help=_("The SMTP server port."),
type=int,
default=25
),
USER = Config(
key="user",
help=_("The username for the SMTP host."),
type=str,
default=""
),
PASSWORD = Config(
key="password",
help=_("The password for the SMTP user."),
|
Drachenfels/Game-yolo-archer
|
server/api/outfits.py
|
Python
|
gpl-2.0
| 228
| 0
|
# -*- coding: utf-8 -*-
def outfit():
collection = []
|
for _ in range(0, 5):
collection.append("Item{}".format(_))
return
|
{
"data": collection,
}
api = [
('/outfit', 'outfit', outfit),
]
|
Romibuzi/cleo
|
cleo/inputs/list_input.py
|
Python
|
mit
| 4,973
| 0.000804
|
# -*- coding: utf-8 -*-
from .input import Input
class ListInput(Input):
"""
ListInput represents an input provided as an array.
Usage:
>>> input_ = ListInput([('name', 'foo'), ('--bar', 'foobar')])
"""
def __init__(self, parameters, definition=None):
"""
Constructor
@param parameters: A dict of parameters
@type parameters: list
@param definition: An InputDefinition instance
@type definition: InputDefinition
"""
self.interactive = False
self.parameters = parameters
super(ListInput, self).__init__(definition)
def get_first_argument(self):
"""
Returns the first argument from the raw parameters (not parsed)
@return: The value of the first argument or None otherwise
@rtype: str
"""
for item in self.parameters:
if isinstance(item, tuple):
key = item[0]
value = item[1]
else:
key = item
value = None
if key and '-' == key[0]:
continue
return value
def has_parameter_option(self, values):
"""
Returns true if the raw parameters (not parsed) contain a value.
This method is to be used to introspect the input parameters
before they have been validated. It must be used carefully.
@param values: The values to look for in the raw parameters (can be a list)
@type values: str or list
@return: True if the value is contained in the raw parameters
@rtype: bool
"""
if not isinstance(values, list):
values = [values]
for item in self.parameters:
if isinstance(item, tuple):
key = item[0]
else:
key = item
if key in values:
return True
return False
def get_parameter_option(self, values, default=False):
"""
Returns the value of a raw option (not parsed).
This method is to be used to introspect the input parameters
before they have been validated. It must be used carefully.
@param values: The values to look for in the raw parameters (can be a list)
@type values: str or list
@param default: The default value to return if no result is found
@type default: mixed
@return: The option value
@rtype: mixed
"""
if not isinstance(values, list):
values = [values]
for item in self.parameters:
if isinstance(item, tuple):
key = item[0]
value = item[1]
else:
key = item
value = None
if key in values:
return value
return default
def parse(self):
"""
Processes command line arguments.
"""
for item in self.parameters:
if isinstance(item, tuple):
key = item[0]
value = item[1]
else:
key = item
value = None
if key.startswith('--'):
self.add_long_option(key[2:], value)
elif key[0] == '-':
self.add_short_option(key[1:], value)
else:
self.add_argument(key, value)
def add_short_option(self, shortcut, value):
"""
Adds a short option value
@param shortcut: The short option key
@type shortcut: str
@param value: The value for the option
@type value: mixed
"""
if not self.definition.has_shortcut(shortcut):
raise Exception('The "-%s" option does not exist.' % shortcut)
self.add_long_option(self.definition.get_option_for_shortcut(sho
|
rtcut).get_name(), value)
def add_long_option(self, name, value):
"""
Adds a long option value
@param name: The long option key
@type name: str
@param value: The value for the option
@type value: mixed
"""
if not self.definition.has_option(name):
raise Exception('The "--%s" option does not exist.' % name)
option = self.definition.get_option(name)
if value is None:
if opt
|
ion.is_value_required():
raise Exception('The "--%s" option requires a value.' % name)
value = option.get_default() if option.is_value_optional() else True
self.options[name] = value
def add_argument(self, name, value):
"""
Adds an argument value
@param name: The argument key
@type name: str
@param value: The value for the argument
@type value: mixed
"""
if not self.definition.has_argument(name):
raise Exception('The "%s" argument does not exist.' % name)
self.arguments[name] = value
|
rodekruis/shelter-database
|
src/web/views/session_mgmt.py
|
Python
|
mit
| 5,400
| 0.003704
|
#! /usr/bin/env python
#-*- coding: utf-8 -*-
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Shelter Database.
# Copyright (c) 2016 Luxembourg Institute of Science and Technology.
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.2 $"
__date__ = "$Date: 2016/05/31$"
__revision__ = "$Date: 2016/06/11 $"
__copyright__ = "Copyright 2016 Luxembourg Institute of Science and Technology"
__license__ = ""
import logging
import datetime
from werkzeug import generate_password_hash
from flask import (render_template, flash, session, request,
url_for, redirect, current_app, g)
from flask_login import LoginManager, logout_user, \
login_required, current_user
from flask_principal import (Principal, AnonymousIdentity, UserNeed,
identity_changed, identity_loaded,
session_identity_loader)
import conf
from bootstrap import db
from web.views.common import admin_role, login_user_bundle
from web.models import User
from web.forms import LoginForm #, SignupForm
from web.lib.utils import HumanitarianId
#from notifications import notifications
Principal(current_app)
# Create a permission with a single Need, in this case a RoleNeed.
login_manager = LoginManager()
login_manager.init_app(current_app)
login_manager.login_message = u"Please log in to access this page."
login_manager.login_message_category = "warning"
login_manager.login_view = 'login'
logger = logging.getLogger(__name__)
@identity_loaded.connect_via(current_app._get_current_object())
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if current_user.is_authenticated:
identity.provides.add(UserNeed(current_user.id))
if current_user.is_admin:
identity.provides.add(admin_role)
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id==user_id, User.is_active==True).first()
@current_app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.datetime.now()
db.session.commit()
@current_app.route('/login', methods=['GET'])
def join():
if current_user.is_authenticated or HumanitarianId().login():
return redirect(url_for('index')
|
)
form = LoginForm()
#signup = SignupForm()
return render_template(
'login.html',
humanitarian_id_auth_uri=conf.HUMANITARIAN_ID_AUTH_URI,
client_id=conf.HUMANITARIAN_ID_CLIENT_ID,
redirect_uri=conf.HUMANITARIAN_ID_REDIRECT_URI,
loginForm=form #, signupForm=signup
)
@current_app.route('/login', methods
|
=['POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
flash('You are logged in', 'info')
login_user_bundle(form.user)
return form.redirect('index')
#signup = SignupForm()
return render_template(
'login.html',
humanitarian_id_auth_uri=conf.HUMANITARIAN_ID_AUTH_URI,
client_id=conf.HUMANITARIAN_ID_CLIENT_ID,
redirect_uri=conf.HUMANITARIAN_ID_REDIRECT_URI,
loginForm=form #, signupForm=signup
)
@current_app.route('/callback/humanitarianid', methods=['GET'])
def login_humanitarianid():
if current_user.is_authenticated:
return redirect(url_for('index'))
access_token = request.values.get('access_token', None)
if access_token:
session['hid_access_token'] = access_token
return redirect(url_for('join'))
return render_template('humanitarianid_login.html')
@current_app.route('/logout')
@login_required
def logout():
# Remove the user information from the session
logout_user()
flash('You are logged out', 'warning')
# Remove session keys set by Flask-Principal
for key in ('identity.name', 'identity.auth_type', 'hid_access_token'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app, identity=AnonymousIdentity())
session_identity_loader()
if request.values.get('hid_logout'):
return redirect(conf.HUMANITARIAN_ID_AUTH_URI+'/logout')
return redirect(url_for('index'))
#@current_app.route('/signup', methods=['POST'])
#def signup():
# """if not conf.SELF_REGISTRATION:
# flash("Self-registration is disabled.", 'warning')
# return redirect(url_for('index'))"""
# if current_user.is_authenticated:
# return redirect(url_for('index'))#
#
# form = SignupForm()
# if form.validate_on_submit():
# user = User(name=form.name.data,
# email=form.email.data,
# pwdhash=generate_password_hash(form.password.data),
# is_active=True)
# db.session.add(user)
# db.session.commit()
# flash('Your account has been created. ', 'success')
# login_user_bundle(user) # automatically log the user
#
# return form.redirect('index')
#
# loginForm = LoginForm()
# return render_template(
# 'join.html',
# loginForm=loginForm, signupForm=form
# )
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/tione/v20191022/models.py
|
Python
|
mit
| 91,250
| 0.002541
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AlgorithmSpecification(AbstractModel):
"""算法配置
"""
def __init__(self):
r"""
:param TrainingImageName: 镜像名字
注意:此字段可能返回 null,表示取不到有效值。
:type TrainingImageName: str
:param TrainingInputMode: 输入模式File|Pipe
注意:此字段可能返回 null,表示取不到有效值。
:type TrainingInputMode: str
:param AlgorithmName: 算法名字
注意:此字段可能返回 null,表示取不到有效值。
:type AlgorithmName: str
"""
self.TrainingImageName = None
self.TrainingInputMode = None
self.AlgorithmName = None
def _deserialize(self, params):
self.TrainingImageName = params.get("TrainingImageName")
self.TrainingInputMode = params.get("TrainingInputMode")
self.AlgorithmName = params.get("AlgorithmName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BillingLabel(AbstractModel):
"""计费标签
"""
def __init__(self):
r"""
:param Label: 计费项标识
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param VolumeSize: 存储大小
:type VolumeSize: int
:param Status: 计费状态
None: 不计费
StorageOnly: 仅存储计费
Computing: 计算和存储都计费
:type Status: str
"""
self.Label = None
self.VolumeSize = None
self.Status = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.VolumeSize = params.get("VolumeSize")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClsConfig(AbstractModel):
"""接入CLS服务的配置
"""
def __init__(self):
r"""
:param Type: 接入类型,可选项为free、customer
:type Type: str
:param LogSetId: 自定义CLS的日志集ID,只有当Type为customer时生效
:type LogSetId: str
:param TopicId: 自定义CLS的日志主题ID,只有当Type为customer时生效
:type TopicId: str
"""
self.Type = None
self.LogSetId = None
self.TopicId = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.LogSetId = params.get("LogSetId")
self.TopicId = params.get("TopicId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CodeRepoSummary(AbstractModel):
"""存储库列表
"""
def __init__(self):
r"""
:param CreationTime: 创建时间
:type CreationTime: str
:param LastModifiedTime: 更新时间
:type LastModifiedTime: str
:param CodeRepositoryName: 存储库名称
:type CodeRepositoryName: str
:param GitConfig: Git配置
:type GitConfig: :class:`tencentcloud.tione.v20191022.models.GitConfig`
:param NoSecret: 是否有Git凭证
:type NoSecret: bool
"""
self.CreationTime = None
self.LastModifiedTime = None
self.CodeRepositoryName = None
self.GitConfig = None
self.NoSecret = None
def _deserialize(self, params):
self.CreationTime = params.get("CreationTime")
self.LastModifiedTime = params.get("LastModifiedTime")
self.CodeRepositoryName = params.get("CodeRepositoryName")
if params.get("GitConfig") is not None:
self.GitConfig = GitConfig()
self.GitConfig._deserialize(params.get("GitConfig"))
self.NoSecret = params.get("NoSecret")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CosDataSource(AbstractModel):
"""cos路径
"""
def __init__(self):
r"""
:param Bucket: cos桶
注意:此字段可能返回 null,表示取不到有效值。
:type Bucket: str
:param KeyPrefix: cos文件key
注意:此字段可能返回 null,表示取不到有效值。
:type KeyPrefix: str
:param DataDistributionType: 分布式数据下载方式
注意:此字段可能返回 null,表示取不到有效值。
:type DataDistributionType: str
:param DataType: 数据类型
注意:此字段可能返回 null,表示取不到有效值。
:type DataType: str
"""
self.Bucket = None
sel
|
f.KeyPrefix = None
self.DataDistributionType = None
self.DataType = None
def _deserialize(self, params)
|
:
self.Bucket = params.get("Bucket")
self.KeyPrefix = params.get("KeyPrefix")
self.DataDistributionType = params.get("DataDistributionType")
self.DataType = params.get("DataType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCodeRepositoryRequest(AbstractModel):
"""CreateCodeRepository请求参数结构体
"""
def __init__(self):
r"""
:param CodeRepositoryName: 存储库名称
:type CodeRepositoryName: str
:param GitConfig: Git相关配置
:type GitConfig: :class:`tencentcloud.tione.v20191022.models.GitConfig`
:param GitSecret: Git凭证
:type GitSecret: :class:`tencentcloud.tione.v20191022.models.GitSecret`
"""
self.CodeRepositoryName = None
self.GitConfig = None
self.GitSecret = None
def _deserialize(self, params):
self.CodeRepositoryName = params.get("CodeRepositoryName")
if params.get("GitConfig") is not None:
self.GitConfig = GitConfig()
self.GitConfig._deserialize(params.get("GitConfig"))
if params.get("GitSecret") is not None:
self.GitSecret = GitSecret()
self.GitSecret._deserialize(params.get("GitSecret"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCodeRepositoryResponse(AbstractModel):
"""CreateCodeRepository返回参数结构体
"""
def __init__(self):
r"""
:param CodeRepositoryName: 存储库名称
:type CodeRepositoryName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CodeRepositoryName = None
self.RequestId = None
def _deserialize(self, params):
self.CodeRepositoryName = params.get("CodeRepositoryName")
self.RequestId = params.get("RequestId")
class CreateNotebookInstanceRequest(AbstractModel):
"""CreateNotebookInstance请求参数结构体
"""
def __init__(self):
r"""
:param NotebookInstanceName: Notebook实例名称,不能超过63个字符
规则:“^\[a-zA-Z0-9\](-\*\[a-zA-Z0-9\])\*$”
:type NotebookInstanceName: str
:param InstanceType: Notebook算力类型
参
|
marc-sensenich/ansible
|
lib/ansible/modules/network/fortios/fortios_webfilter_urlfilter.py
|
Python
|
gpl-3.0
| 12,762
| 0.001254
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_urlfilter
short_description: Configure URL filter lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and urlfilter category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_urlfilter:
description:
- Configure URL filter lists.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Optional comments.
entries:
description:
- URL filter entries.
suboptions:
action:
description:
- Action to take for URL filter matches.
choices:
- exempt
- block
- allow
- monitor
dns-address-family:
description:
- Resolve IPv4 address, IPv6 address, or both from DNS server.
choices:
- ipv4
- ipv6
- both
exempt:
description:
- If action is set to exempt, select the security profile operations that exempt URLs skip. Separate multiple options with a space.
choices:
- av
- web-content
- activex-java-cookie
- dlp
- fortiguard
- range-block
- pass
- all
id:
description:
- Id.
required: true
referrer-host:
description:
- Referrer host name.
status:
description:
- Enable/disable this URL filter.
choices:
- enable
- disable
type:
description:
- Filter type (simple, regex, or wildcard).
choices:
- simple
- regex
- wildcard
url:
description:
- URL to be filtered.
web-proxy-profile:
description:
- Web proxy profile. Source web-proxy.profile.name.
id:
description:
- ID.
required: true
ip-addr-block:
description:
- Enable/disable blocking URLs when the hostname appears as an IP address.
choices:
- enable
- disable
name:
description:
- Name of URL filter list.
one-arm-ips-urlfilter:
description:
- Enable/disable DNS resolver for one-arm IPS URL filter operation.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure URL filter lists.
fortios_webfilter_urlfilter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_urlfilter:
state: "present"
comment: "Optional comments."
entries:
-
action: "exempt"
dns-address-family: "ipv4"
exempt: "av"
id: "8"
referrer-host: "myhostname"
status: "enable"
type: "simple"
url: "myurl.com"
web-proxy-profile: "<your_own_value> (source web-proxy.profile.name)"
id: "14"
ip-addr-block: "enable"
name: "default_name_16"
one-arm-ips-urlfilter: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
|
vdom:
description: Virtual do
|
main used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug(
|
notapresent/rbm2m
|
rbm2m/action/stats.py
|
Python
|
apache-2.0
| 2,721
| 0
|
# -*- coding: utf-8 -*-
from sqlalchemy import func, distinct
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import literal
from rbm2m.models import Record, Image, Scan, Genre, scan_records
def get_overview(sess):
"""
Returns aggregated statistics about records, scans, genres etc
"""
last_scan_ids = [scan.id for scan in last_scans(sess)]
if not last_scan_ids: # Append dummy ID to avoid SA warning
last_scan_ids = [-1] # about IN-predicate with an empty sequence
rec_instock = (
sess.query(func.count(distinct(scan_records.c.record_id)))
.filter(scan_records.c.scan_id.in_(last_scan_ids))
.as_scalar()
)
img_total = sess.query(func.count(Image.id)).as_scalar()
img_total_size = sess.query(func.sum(Image.length)).as_scalar()
lots = (
sess.query(func.count(scan_records.c.record_id))
.filter(scan_records.c.scan_id.in_(last_scan_ids))
.join(Scan)
.join(Genre)
.filter(Genre.export_enabled.is_(True))
.as_scalar()
)
row = sess.query(
func.count(Record.id).label('records_total'),
rec_instock.label('records_in_stock'),
img_total.label('images_total'),
img_total_size.label('images_total_length'),
lots.label('lots')
).one()
result = dict(zip(row.keys(), row))
result['images_total_length'] = int(result['images_total_length'] or 0)
return result
def last_scans(sess):
"""
Returns list of last successful scans, one for each genre
"""
s1 = aliased(Scan)
rows = (
sess.query(Scan)
.join(Genre, Scan.genre_id == Genre.id)
.filter(Scan.id == sess.query(s1.id)
.filter(s1.genre_id == Genre.id)
.filter(s1.status == 'success')
.order_by(s1.started_at.desc())
.limit(1)
.as_scalar()
)
.all()
)
return rows
def active_scans(sess):
"""
|
Returns list of scans currently in progress, along with
current record count for each scan
"""
rec_count = (
sess.query(func.count(scan_records.c.record_id))
.filter(scan_records.c.
|
scan_id == Scan.id)
.correlate(Scan)
.as_scalar()
)
rows = (
sess.query(Scan.id, Scan.started_at, Scan.est_num_records,
rec_count.label('num_records'), Genre.title)
.join(Genre, Genre.id == Scan.genre_id)
.filter(Scan.status == 'running')
.order_by(Scan.started_at)
.all()
)
scans = [dict(zip(r.keys(), r)) for r in rows]
return scans
|
evernym/zeno
|
plenum/test/consensus/order_service/test_can_send_3pc.py
|
Python
|
apache-2.0
| 5,936
| 0.002695
|
import pytest
from plenum.common.startable import Mode
def test_can_send_3pc_batch_by_primary_only(primary_orderer):
assert primary_orderer.can_send_3pc_batch()
primary_orderer._data.primary_name = "SomeNode:0"
assert not primary_orderer.can_send_3pc_batch()
def test_can_send_3pc_batch_not_participating(primary_orderer, mode):
primary_orderer._data.node_mode = mode
result = primary_orderer.can_send_3pc_batch()
assert result == (mode == Mode.participating)
def test_can_send_3pc_batch_old_view(primary_orderer, mode):
primary_orderer.last_ordered_3pc = (primary_orderer.view_no + 1, 0)
primary_orderer._data.node_mode = mode
assert not primary_orderer.can_send_3pc_batch()
def test_can_send_3pc_batch_old_pp_seq_no_for_view(primary_orderer, mode):
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, 100)
primary_orderer._lastPrePrepareSeqNo = 0
primary_orderer._data.node_mode = mode
assert not primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [1, 3, 8, 13])
def test_can_send_multiple_3pc_batches(primary_o
|
rderer, initial_s
|
eq_no, monkeypatch):
monkeypatch.setattr(primary_orderer._config, 'Max3PCBatchesInFlight', None)
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, initial_seq_no)
primary_orderer._lastPrePrepareSeqNo = initial_seq_no + 10
assert primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [1, 3, 8, 13])
@pytest.mark.parametrize('num_in_flight', [0, 1, 2, 3])
def test_can_send_multiple_3pc_batches_below_limit(primary_orderer, initial_seq_no, num_in_flight, monkeypatch):
limit = 4
monkeypatch.setattr(primary_orderer._config, 'Max3PCBatchesInFlight', limit)
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, initial_seq_no)
primary_orderer._lastPrePrepareSeqNo = initial_seq_no + num_in_flight
assert primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [1, 3, 8, 13])
@pytest.mark.parametrize('above_limit', [0, 1, 2, 5, 10])
def test_cannot_send_multiple_3pc_batches_above_limit(primary_orderer, initial_seq_no, above_limit, monkeypatch):
limit = 4
monkeypatch.setattr(primary_orderer._config, 'Max3PCBatchesInFlight', limit)
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, initial_seq_no)
primary_orderer._lastPrePrepareSeqNo = initial_seq_no + limit + above_limit
assert not primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('initial_seq_no', [1, 3, 8, 13])
@pytest.mark.parametrize('num_in_flight', [0, 1, 2, 3, 4, 5, 10])
def test_can_send_multiple_3pc_batches_in_next_view(primary_orderer, initial_seq_no, num_in_flight, monkeypatch):
limit = 4
monkeypatch.setattr(primary_orderer._config, 'Max3PCBatchesInFlight', limit)
primary_orderer.last_ordered_3pc = (primary_orderer.view_no - 1, initial_seq_no)
primary_orderer._lastPrePrepareSeqNo = initial_seq_no + num_in_flight
assert primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('last_pp_seqno', [0, 1, 9])
def test_cannot_send_3pc_batch_below_prev_view_prep_cert(primary_orderer, last_pp_seqno):
primary_orderer._data.prev_view_prepare_cert = 10
primary_orderer._lastPrePrepareSeqNo = last_pp_seqno
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, last_pp_seqno)
assert not primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('last_pp_seqno', [0, 9, 10])
def test_can_send_3pc_batch_None_prev_view_prep_cert(primary_orderer, last_pp_seqno):
primary_orderer._data.prev_view_prepare_cert = 0
primary_orderer._lastPrePrepareSeqNo = last_pp_seqno
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, last_pp_seqno)
assert primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('last_pp_seqno', [10, 11, 100])
def test_can_send_3pc_batch_above_prev_view_prep_cert(primary_orderer, last_pp_seqno):
primary_orderer._data.prev_view_prepare_cert = 10
primary_orderer._lastPrePrepareSeqNo = last_pp_seqno
primary_orderer.last_ordered_3pc = (primary_orderer.view_no, last_pp_seqno)
assert primary_orderer.can_send_3pc_batch()
@pytest.mark.parametrize('last_ordered_3pc, can_send',
[
((0, 0), False),
((0, 1), False),
((0, 9), False),
((0, 10), False),
((0, 11), True),
((0, 12), True),
((0, 13), True)
])
def test_can_not_send_3pc_until_first_batch_in_non_zero_view_ordered(primary_orderer, last_ordered_3pc, can_send):
primary_orderer._data.view_no = 1
primary_orderer._data.prev_view_prepare_cert = 10
primary_orderer._lastPrePrepareSeqNo = max(11, last_ordered_3pc[1])
primary_orderer._data.last_ordered_3pc = last_ordered_3pc
assert primary_orderer.can_send_3pc_batch() == can_send
@pytest.mark.parametrize('last_ordered_3pc, can_send',
[
((0, 0), True),
((0, 1), True),
((0, 9), True),
((0, 10), True),
((0, 11), True),
((0, 12), True),
((0, 13), True)
])
def test_can_send_3pc_before_first_batch_in_zero_view_ordered(primary_orderer, last_ordered_3pc, can_send, monkeypatch):
monkeypatch.setattr(primary_orderer._config, 'Max3PCBatchesInFlight', 20)
primary_orderer._data.view_no = 0
primary_orderer._data.prev_view_prepare_cert = 10
primary_orderer._lastPrePrepareSeqNo = max(11, last_ordered_3pc[1])
primary_orderer._data.last_ordered_3pc = last_ordered_3pc
assert primary_orderer.can_send_3pc_batch() == can_send
|
warisb/derpbox
|
DerpBox/file_utils.py
|
Python
|
mit
| 1,062
| 0
|
#!/usr/bin/env python
"""file_utils.py: convenient file operations used by derpbox"""
__author__ = "Waris Boonyasiriwat"
__copyright__ = "Copyright 2017"
import os
import hashlib
def md5(filename):
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
ha
|
sh_md5.update(chunk)
return hash_md5.hexdigest()
def create_file_obj(id, root_path, path):
|
file_obj = {
'id': id,
'path': path,
'isDirectory': os.path.isdir(root_path + path),
}
if not file_obj['isDirectory']:
file_obj['hash'] = md5(root_path + path)
return file_obj
def get_paths_recursive(root_path):
paths = []
for root, dirs, files in os.walk(root_path):
for f in files:
path = os.path.relpath(os.path.join(root, f), root_path)
paths.append(path.replace('\\', '/'))
for d in dirs:
path = os.path.relpath(os.path.join(root, d), root_path)
paths.append(path.replace('\\', '/'))
return paths
|
Colviz/Vince
|
groups/group_server.py
|
Python
|
apache-2.0
| 2,451
| 0.020808
|
#!/usr/bin/python
from subprocess import call
import sys
import os
from socket import *
cs = socket(AF_INET, SOCK_DGRAM)
cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
###Broadcast according to client group
#Show ports associated with a particular group
file = "group_port.txt" #Name of file containing Groups
a = open(file,'r')
file_contents = a.read()
print(file_contents)
a.close()
#Taking port as input
print("Enter the port of the associated Group: ")
port = int(input())
###Assigning the port and broadcasting address
#Note - Change Port no. according to the group no.
#port = 9999 #Default port
addr = ('255.255.255.255',port) #Address used for broadcasting
###Setting the buffer size
buf =1024 #Buffer Size
file_name=sys.argv[1] #Taking file name from command line argument [0]-program_file name, [1]- input provided
|
#[2] - multicast (using for broadcasting), [3] - file with list of IP's,on which to broadcast
###Writing server's IP to file
#Taking the ip as input from server_ip file - just for reference
fp = open("ser
|
ver_ip","r")
ip = fp.read()
fp.close()
written = 0
ipp = ip
#Checking if IP already exists
fl = open(file_name,'r')
lines = fl.readlines()
for line in lines:
if line == ipp:
written = 1
fl.close()
#If not written then write IP to file
if written !=1:
file = open(file_name,"a")
file.write(ip)
file.close()
#Writing IP ends here
#Encrypting the file with GPG key
call(["gpg", "-r", "trialuser@mailinator.com", "-e", file_name])
file_name = file_name+".gpg" #New file name
###Putting the file's content in buffer
f=open(file_name,"rb") #Opening file in read mode
data = f.read(buf) #Taking the data from file into data variable
###Sending the data
print("##################################################")
print("# Sending File to the selected group #")
print("##################################################\n")
print("##################################################")
print("# File sent to the group #")
print("##################################################")
os.remove(file_name) #Delete the intermediate (encrypted file)
cs.sendto(data,addr) #Sending data to the broadcasting address
|
neversun/sailfish-hackernews
|
pyPackages/python_firebase-noarch/firebase/firebase.py
|
Python
|
mit
| 16,320
| 0.001287
|
try:
import urlparse
except ImportError:
#py3k
from urllib import parse as urlparse
import json
from .firebase_token_generator import FirebaseTokenGenerator
from .decorators import http_connection
from .multiprocess_pool import process_pool
from .jsonutil import JSONEncoder
__all__ = ['FirebaseAuthentication', 'FirebaseApplication']
@http_connection(60)
def make_get_request(url, params, headers, connection):
"""
Helper function that makes an HTTP GET request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_get_request('http://firebase.localhost/users', {'print': silent'},
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'John Doe', '2': 'Jane Doe'}
"""
timeout = getattr(connection, 'timeout')
response = connection.get(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_put_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PUT request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users',
'{"1": "Ozgur Vatansever"}',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.put(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_post_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP POST request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict th
|
at is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Py
|
thon dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {u'name': u'-Inw6zol_2f5ThHwVcSe'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.post(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_patch_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.patch(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_delete_request(url, params, headers, connection):
"""
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.delete(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
class FirebaseUser(object):
"""
Class that wraps the credentials of the authenticated user. Think of
this as a container that holds authentication related data.
"""
def __init__(self, email, firebase_auth_token, provider, id=None):
self.email = email
self.firebase_auth_token = firebase_auth_token
self.provider = provider
self.id = id
class FirebaseAuthentication(object):
"""
Class that wraps the Firebase SimpleLogin mechanism. Actually this
class does not trigger a connection, simply fakes the auth action.
In addition, the provided email and password information is totally
useless and they never appear in the ``auth`` variable at the server.
"""
def __init__(self, secret, email, debug=False, admin=False, extra=None):
self.authenticator = FirebaseTokenGenerator(secret, debug, admin)
self.email = email
self.provider = 'password'
self.extra = (extra or {}).copy()
self.extra.update({'debug': debug, 'admin': admin,
'email': self.email, 'provider': self.provider})
def get_user(self):
"
|
khchine5/xl
|
lino_xl/lib/contacts/choicelists.py
|
Python
|
bsd-2-clause
| 272
| 0.003676
|
# -*- coding: UTF-8 -*-
#
|
Copyright 2016 Luc Saffre
# License: BSD (see file COPYING for details)
from lino.api im
|
port dd, _
class PartnerEvents(dd.ChoiceList):
verbose_name = _("Observed event")
verbose_name_plural = _("Observed events")
max_length = 50
|
Lilykos/invenio
|
invenio/celery/tasks.py
|
Python
|
gpl-2.0
| 1,196
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
|
# You should have received a copy of the GNU General Public License
# alon
|
g with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
from invenio.celery import celery
@celery.task
def invenio_version():
""" Task that will return the current running Invenio version """
from invenio.base.globals import cfg
return cfg['CFG_VERSION']
@celery.task
def invenio_db_test(num):
""" Task will execute a simple query in the database"""
from invenio.ext.sqlalchemy import db
return db.engine.execute("select %s" % int(num)).scalar()
|
laginha/django-easy-response
|
src/easy_response/decorators.py
|
Python
|
mit
| 366
| 0.005464
|
from .utils.process import to_http
from .consts import BASIC_SERIALIZATION
def serialization(basic=BASIC_SERIALIZATION):
def decorator(view):
def wrapper(request, *args, **kwargs):
response = view(request, *args, **
|
kwargs)
|
return to_http(request, response, basic_serialization=basic)
return wrapper
return decorator
|
commaai/openpilot
|
selfdrive/car/interfaces.py
|
Python
|
mit
| 9,765
| 0.009421
|
import os
import time
from abc import abstractmethod, ABC
from typing import Dict, Tuple, List
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS
ACCEL_MAX = 2.0
ACCEL_MIN = -3.5
# generic car and radar interfaces
class CarInterfaceBase(ABC):
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.steering_unpressed = 0
self.low_speed_alert = False
self.silent_steer_warning = True
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.cp_loopback = self.CS.get_loopback_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def get_pid_accel_limits(CP, current_speed, cruise_speed):
return ACCEL_MIN, ACCEL_MAX
@staticmethod
@abstractmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
pass
@staticmethod
def init(CP, logcan, sendcan):
pass
@staticmethod
def get_steer_feedforward_default(desired_angle, v_ego):
# Proportional to realigning tire momentum: lateral acceleration.
# TODO: something with lateralPlan.curvatureRates
return desired_angle * (v_ego**2)
@classmethod
def get_steer_feedforward_function(cls):
return cls.get_steer_feedforward_default
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.unsafeMode = 0 # see panda/board/safety_declarations.h for allowed values
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
ret.wheelSpeedFactor = 1.0
ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.openpilotLongitudinalControl = False
ret.stopAccel = -2.0
ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop
ret.vEgoStopping = 0.5
ret.vEgoStarting = 0.5
ret.stoppingControl = True
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kf = 1.
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
# TODO estimate car specific lag, use .15s for now
ret.longitudinalActuatorDelayLowerBound = 0.15
ret.longitudinalActuatorDelayUpperBound = 0.15
ret.steerLimitTimer = 1.0
return ret
@abstractmethod
def update(self, c: car.CarControl, can_strings: List[bytes]) -> car.CarState:
pass
@abstractmethod
def apply(self, c: car.CarControl) -> Tuple[car.CarCon
|
trol.Actuators, List[bytes]]:
pass
def create_common_events(self, cs_out, extra_gears=None, pcm_enable=True):
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter
|
!= GearShifter.drive and (extra_gears is None or
cs_out.gearShifter not in extra_gears):
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
if cs_out.brakeHoldActive and self.CP.openpilotLongitudinalControl:
events.add(EventName.brakeHold)
# Handle permanent and temporary steering faults
self.steering_unpressed = 0 if cs_out.steeringPressed else self.steering_unpressed + 1
if cs_out.steerFaultTemporary:
# if the user overrode recently, show a less harsh alert
if self.silent_steer_warning or cs_out.standstill or self.steering_unpressed < int(1.5 / DT_CTRL):
self.silent_steer_warning = True
events.add(EventName.steerTempUnavailableSilent)
else:
events.add(EventName.steerTempUnavailable)
else:
self.silent_steer_warning = False
if cs_out.steerFaultPermanent:
events.add(EventName.steerUnavailable)
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
if (cs_out.gasPressed and not self.CS.out.gasPressed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase(ABC):
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase(ABC):
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
self.left_blinker_prev = False
self.right_blinker_prev = False
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def get_wheel_speeds(self, fl, fr, rl, rr, unit=CV.KPH_TO_MS):
factor = unit * self.CP.wheelSpeedFactor
wheelSpeeds = car.CarState.WheelSpeeds.new_message()
wheelSpeeds.fl = fl * factor
wheelSpeeds.fr = fr * factor
wheelSpeeds.rl = rl * factor
wheelSpeeds.rr = rr * factor
return wheelSpeeds
def update_blinker_from_lamp(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
"""Update blinkers from lights. Enable output when light was seen within the last `blinker_time`
iterations"""
# TODO: Handle case when switching direction. Now both blinkers can be on at the same time
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_bl
|
tatsy/hydra
|
hydra/tonemap/durand.py
|
Python
|
mit
| 1,367
| 0.008047
|
"""
Implementation of the paper,
Durand and Dorsey SIGGGRAPH 2002,
"Fast Bilateral Fitering for the display of high-dynamic range images"
"""
import numpy as np
import hydra.io
import hydra.filters
def bilateral_separation(img, sigma_s=0.02, sigma_r=0.4):
r, c = img.shape
sigma_s = max(r, c) * sigma_s
img_log = np.log10(img + 1.0e-6)
img_fil = hydra.filters.bilateral(img_log, sigma_s, sigma_r)
base = 10.0 ** (img_fil) - 1.0e-6
base[base <= 0.0] = 0.0
base = base.reshape((r, c))
detail = hydra.core.remove_specials(img / base)
return base, detail
def durand(img, target_contrast=5.0):
L = hydra.core.lum(img)
tmp = np.zeros(img.shape)
for c in range(3):
tmp[:,:,c] = hydra.core.remove_specials(img[:,:,c] / L)
Lbase, Ldetail = bilateral_separation(L)
log_base = np.log10(Lbase)
|
max_log_base = np.max(log_base)
log_detail = np.log10(Ldetail)
compression_factor = np.log(target_contrast) / (max_log_base - np.min(log_base))
log_absolute = compression_factor * max_log_base
log_compressed = log_base * compression_factor + log_detail - log_absolute
output = np.power(10.0, log_compressed)
ret = np.zeros(img.shape)
for c in
|
range(3):
ret[:,:,c] = tmp[:,:,c] * output
ret = np.maximum(ret, 0.0)
ret = np.minimum(ret, 1.0)
return ret
|
Fierydemise/ShadowCraft-Engine
|
tests/objects_tests/race_tests.py
|
Python
|
lgpl-3.0
| 2,806
| 0.003207
|
import unittest
from shadowcraft.objects import race
class TestRace(unittest.TestCase):
def setUp(self):
self.race = race.Race('human')
def test__init__(self):
self.assertEqual(self.race.race_name, 'human')
self.assertEqual(self.race.character_class, 'rogue')
def test_set_racials(self):
self.assertTrue(self.race.human_spirit)
self.assertFalse(self.race.blood_fury_physical)
def test_exceptions(self):
self.assertRaises(race.InvalidRaceException, self.race.__setattr__, 'level', 111)
self.assertRaises(race.InvalidRaceException, self.race.__init__, 'murloc')
self.assertRaises(race.InvalidRaceException, self.race.__init__, 'undead', 'demon_hunter')
def test__getattr__(self):
racial_stats = (288, 306, 212, 169, 127)
for i, stat in enumerate(['racial_str', 'racial_agi', 'racial_sta', 'racial_int', 'racial_spi']):
self.assertEqual(getattr(self.race, stat), racial_stats[i])
racial_stats = (288 - 4, 306 + 4, 212, 169, 127)
night_elf = race.Race('night_elf')
for i, stat in enumerate(['racial_str', 'racial_agi', 'racial_sta', 'racial_int', 'racial_spi']):
self.assertEqual(getattr(night_elf, stat), racial_stats[i])
def test_get_racial_crit(self):
for weapon in ('thrown', 'gun', 'bow'):
self.assertEqual(self.race.get_racial_crit(weapon), 0)
troll = race.Race('troll')
self.assertEqual(troll.get_racial_crit(), 0)
worgen = race.Race('worgen')
self.assertEqual(worgen.get_racial_crit(), 0.01)
self.ass
|
ertEqual(worgen.get_racial_crit('gun'), 0.01)
self.assertEqual(worgen.get_racial_crit('axe'), 0.01)
def test_get_racial_haste(self):
self.assertEqual(self.race.get_racial_haste(), 0)
goblin = race.Race('goblin')
self.assertEqual(goblin.get_racial_haste(), 0.01)
def test_get_racial_stat_bo
|
osts(self):
self.assertEqual(len(self.race.get_racial_stat_boosts()), 0)
orc = race.Race('orc')
orc.level = 110;
abilities = orc.get_racial_stat_boosts()
self.assertEqual(len(abilities), 2)
self.assertEqual(abilities[0]['duration'], 15)
self.assertTrue(abilities[1]['stat'] in ('ap', 'sp'))
self.assertNotEqual(abilities[0]['stat'],abilities[1]['stat'])
if (abilities[0]['stat'] == 'ap'):
self.assertEqual(abilities[0]['value'], 2243)
else:
self.assertEqual(abilities[0]['value'], 585)
def test_goblin_racial(self):
goblin = race.Race('goblin')
goblin.level = 80
self.assertTrue(goblin.rocket_barrage)
self.assertAlmostEqual(goblin.activated_racial_data['rocket_barrage']['value'](goblin, 10, 10, 10), 172.8093)
|
lovekun/Notebook
|
python/chatroomServer.py
|
Python
|
gpl-2.0
| 654
| 0.003058
|
import socket
import threading
import time
def tcplink(sock, addr):
print 'Accept new connection from %s:%s...' % addr
sock.send('Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if data == 'exit' or not data:
break
sock.send('Hello, %s!'
|
% data)
sock.close()
print 'Connection from %s:%s closed.' % addr
s = socket.socket(so
|
cket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 8888))
s.listen(5)
print 'Waiting for connection...'
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
ysekky/GPy
|
GPy/kern/src/multidimensional_integral_limits.py
|
Python
|
bsd-3-clause
| 6,207
| 0.020622
|
# Written by Mike Smith michaeltsmith.org.uk
from __future__ import division
import numpy as np
from .kern import Kern
from ...core.parameterization import Param
from paramz.transformations import Logexp
import math
class Multidimensional_Integral_Limits(Kern): #todo do I need to inherit from Stationary
"""
Integral kernel, can include limits on each integral value. This kernel allows an n-dimensional
histogram or binned data to be modelled. The outputs are the counts in each bin. The inputs
are the start and end points of each bin: Pairs of inputs act as the limits on each bin. So
inputs 4 and 5 provide the start and end values of each bin in the 3rd dimension.
The kernel's predictions are the latent function which might have generated those binned results.
"""
def __init__(self, input_dim, variances=None, lengthscale=None, ARD=False, active_dims=None, name='integral'):
super(Multidimensional_Integral_Limits, self).__init__(input_dim, active_dims, name)
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
self.lengthscale = Param('lengthscale', lengthscale, Logexp()) #Logexp - transforms to allow positive only values...
self.variances = Param('variances', variances, Logexp()) #and here.
self.link_parameters(self.variances, self.lengthscale) #this just takes a list of parameters we need to optimise.
def h(self, z):
return 0.5 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
def dk_dl(self, t, tprime, s, sprime, l): #derivative of the kernel wrt lengthscale
return l * ( self.h((t-sprime)/l) - self.h((t - tprime)/l) + self.h((tprime-s)/l) - self.h((s-sprime)/l))
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None: #we're finding dK_xx/dTheta
dK_dl_term = np.zeros([X.shape[0],X.shape[0],self.lengthscale.shape[0]])
k_term = np.zeros([X.shape[0],X.shape[0],self.lengthscale.shape[0]])
dK_dl = np.zeros([X.shape[0],X.shape[0],self.lengthscale.shape[0]])
dK_dv = np.zeros([X.shape[0],X.shape[0]])
for il,l in enumerate(self.lengthscale):
idx = il*2
for i,x in enumerate(X):
for j,x2 in enumerate(X):
dK_dl_term[i,j,il] = self.dk_dl(x[idx],x2[idx],x[idx+1],x2[idx+1],l)
k_term[i,j,il] = self.k_xx(x[idx],x2[idx],x[idx+1],x2[idx+1],l)
for il,l in enumerate(self.lengthscale):
dK_dl = self.variances[0] * dK_dl_term[:,:,il]
for jl, l in enumerate(self.lengthscale):
if jl!=il:
dK_dl *= k_term[:,:,jl]
self.lengthscale.gradient[il] = np.sum(dK_dl * dL_dK)
dK_dv = self.calc_K_xx_wo_variance(X) #the gradient wrt the variance is k_xx.
self.variances.gradient = np.sum(dK_dv * dL_dK)
else: #we're finding dK_xf/Dtheta
raise NotImplementedError("Currently this function only handles finding the gradient of a single vector of inputs (X) not a pair of vectors (X and X2)")
#useful little function to help calculate the covariances.
def g(self,z):
return 1.0 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
def k_xx(self,t,tprime,s,sprime,l):
"""Cov
|
ariance between observed values.
s and t are one domain of the integral (i.e. the integral between s and t)
sprime and tprime are another domain of the integral (i.e. the integral between sprime and tprime)
We're interested in how correlated these two integrals are.
Note: We've not multiplied by the variance, this is done in K."""
return 0.5 * (l**2) * ( self.g((t-sprime)/l) + self.g((tprime-s)/l) - self
|
.g((t - tprime)/l) - self.g((s-sprime)/l))
def k_ff(self,t,tprime,l):
"""Doesn't need s or sprime as we're looking at the 'derivatives', so no domains over which to integrate are required"""
return np.exp(-((t-tprime)**2)/(l**2)) #rbf
def k_xf(self,t,tprime,s,l):
"""Covariance between the gradient (latent value) and the actual (observed) value.
Note that sprime isn't actually used in this expression, presumably because the 'primes' are the gradient (latent) values which don't
involve an integration, and thus there is no domain over which they're integrated, just a single value that we want."""
return 0.5 * np.sqrt(math.pi) * l * (math.erf((t-tprime)/l) + math.erf((tprime-s)/l))
def calc_K_xx_wo_variance(self,X):
"""Calculates K_xx without the variance term"""
K_xx = np.ones([X.shape[0],X.shape[0]]) #ones now as a product occurs over each dimension
for i,x in enumerate(X):
for j,x2 in enumerate(X):
for il,l in enumerate(self.lengthscale):
idx = il*2 #each pair of input dimensions describe the limits on one actual dimension in the data
K_xx[i,j] *= self.k_xx(x[idx],x2[idx],x[idx+1],x2[idx+1],l)
return K_xx
def K(self, X, X2=None):
if X2 is None: #X vs X
K_xx = self.calc_K_xx_wo_variance(X)
return K_xx * self.variances[0]
else: #X vs X2
K_xf = np.ones([X.shape[0],X2.shape[0]])
for i,x in enumerate(X):
for j,x2 in enumerate(X2):
for il,l in enumerate(self.lengthscale):
idx = il*2
K_xf[i,j] *= self.k_xf(x[idx],x2[idx],x[idx+1],l)
return K_xf * self.variances[0]
def Kdiag(self, X):
"""I've used the fact that we call this method for K_ff when finding the covariance as a hack so
I know if I should return K_ff or K_xx. In this case we're returning K_ff!!
$K_{ff}^{post} = K_{ff} - K_{fx} K_{xx}^{-1} K_{xf}$"""
K_ff = np.ones(X.shape[0])
for i,x in enumerate(X):
for il,l in enumerate(self.lengthscale):
idx = il*2
K_ff[i] *= self.k_ff(x[idx],x[idx],l)
return K_ff * self.variances[0]
|
QiJune/Paddle
|
python/paddle/trainer_config_helpers/tests/configs/projections.py
|
Python
|
apache-2.0
| 2,317
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Test mixed layer, projections and operators.
'''
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-4)
din = data_layer(name='test', size=100)
din = embedding_layer(input=din, size=256)
with mixed_layer(size=100) as m1:
m1 += full_matrix_projection(input=din)
with mixed_layer(size=100) as m2:
m2 += table_projection(input=m1)
with mixed_layer(size=100) as m3:
m3 += identity_projection(input=m2)
with mixed_layer(size=100) as m4:
m4 += dotmul_projection(input=m3)
with mixed_layer() as m5:
m5 += context_projection(input=m4, context_len=3)
with mixed_l
|
ayer() as m6:
m6 += dotmul_operator(a=m3, b=m4)
m6 += scaling_projection(m3)
img = data_layer(name='img', size=32 * 32)
flt = data_layer(name='filter', size=3 * 3 * 1 * 64)
with mixed_layer() as m7:
m7 += conv_operator(
img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3)
m7 += conv_projection(img, filter_size=3, num_filte
|
rs=64, num_channels=1)
with mixed_layer() as m8:
m8 += conv_operator(
img=img,
filter=flt,
num_filters=64,
num_channels=1,
filter_size=3,
stride=2,
padding=1,
trans=True)
m8 += conv_projection(
img,
filter_size=3,
num_filters=64,
num_channels=1,
stride=2,
padding=1,
trans=True)
end = mixed_layer(
input=[
full_matrix_projection(input=m5),
trans_full_matrix_projection(input=m6),
full_matrix_projection(input=m7), full_matrix_projection(input=m8)
],
size=100,
layer_attr=ExtraAttr(
drop_rate=0.5, error_clipping_threshold=40))
outputs(end)
|
jenskutilek/Glyphs-Scripts
|
Glyphs/DecRO.py
|
Python
|
mit
| 292
| 0.006849
|
# MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.rem
|
oveOverlap()
l.correctPathDirection()
| |
paulftw/titan-files
|
tests/files/dirs_test.py
|
Python
|
apache-2.0
| 5,379
| 0.001487
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dirs.py."""
from tests.common import testing
import time
from titan.common.lib.google.apputils import basetest
from titan.files import files
from titan.files import dirs
PATH_WRITE_ACTION = dirs.ModifiedPath.WRITE
PATH_DELETE_ACTION = dirs.ModifiedPath.DELETE
class DirManagingFile(dirs.DirManagerMixin, files.File):
pass
class DirManagerTest(testing.BaseTestCase):
def tearDown(self):
files.UnregisterFileFactory()
super(DirManagerTest, self).tearDown()
def testEndToEnd(self):
files.RegisterFileFactory(lambda *args, **kwargs: DirManagingFile)
# Make the ETA buffer negative so tasks are available instantly for lease.
self.stubs.SmartSet(dirs, 'TASKQUEUE_LEASE_ETA_BUFFER',
-(dirs.TASKQUEUE_LEASE_ETA_BUFFER * 86400))
# Make time.time() return a constant (to guarantee tasks are all created
# in the same window, so they can be processed with a single call).
now = time.time()
self.stubs.Set(dirs.time, 'time', lambda: now)
files.File('/a/b/foo').Write('')
files.File('/a/b/bar').Write('')
files.File('/a/d/foo').Write('')
# Run the consumer (the cron job).
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
# List root dir.
self.assertEqual(dirs.Dirs(['/a']), dirs.Dirs.List('/'))
# List /a.
self.assertEqual(dirs.Dirs(['/a/b', '/a/d']), dirs.Dirs.List('/a/'))
# List /a/b/.
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/a/b/'))
# List /fake/dir.
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/fake/dir'))
# Test deleting directories.
files.File('/a/d/foo').Delete()
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
# List /a.
self.assertEqual(dirs.Dirs(['/a/b']), dirs.Dirs.List('/a/'))
self.assertEqual(dirs.Dirs(['/a']), dirs.Dirs.List('/'))
# Delete the remaining files and list again.
files.File('/a/b/foo').Delete()
files.File('/a/b/bar').Delete()
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/'))
def testComputeAffectedDirs(self):
dir_service = dirs.DirService()
# /a/b/foo is written.
modified_path = dirs.ModifiedPath(
'/a/b/foo', modified=0, action=PATH_WRITE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([modified_path])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# /a/b/foo is deleted.
modified_path = dirs.ModifiedPath(
'/a/b/foo', modified=0, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([modified_path])
expected_affected_dirs = {
'dirs_with_adds': set(),
'dirs_with_deletes': set(['/a', '/a/b']),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# /a/b/foo is added, then deleted -- dirs should exist in only one list.
added_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_WRITE_ACTION)
deleted_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.2, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([added_path, deleted_path])
expected_affected_dirs = {
'dirs_with_adds': set(),
'dirs_with_deletes': set(['/a', '/a/b']),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# Test different file paths -- dirs should exist in both lists.
added_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_WRITE_ACTION)
deleted_path = dirs.ModifiedPath(
'/a/b/c/d/bar', modified=123123.2, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([added_path, deleted_path])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(['/a', '/a/b', '/a/b/c', '/a/b/c/d']),
}
self.assertEqual(expected_affected_dirs, affected_dirs
|
)
# Test chronological ordering, even with out-of-order arguments.
path1 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.0, action=PATH_DELETE_ACTION)
path2 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.2, action=PA
|
TH_WRITE_ACTION)
path3 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([path1, path2, path3])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
if __name__ == '__main__':
basetest.main()
|
lockwooddev/django-perseus
|
django_perseus/renderers/default.py
|
Python
|
mit
| 2,589
| 0.001159
|
from django.conf import settings
from django.test.client import Client
from .base import BaseRenderer
from django_perseus.exceptions import RendererException
import logging
import mimetypes
import os
logger = logging.getLogger('perseus')
class DefaultRenderer(BaseRenderer):
def render_path(self, path=None, view=None):
if path:
# create deploy dir if not e
|
xists
deploy_dir = settings.PERSEUS_SOURCE_DIR
outpath = os.path.join(deploy_dir, '')
if not os.path.exists(deploy_dir):
os.makedirs(depl
|
oy_dir)
# create index page
if path == '/':
response, mime = self.render_page(path)
outpath = os.path.join(outpath, 'index{0}'.format(mime))
self.save_page(response, outpath)
return
# strip paths to ready them for mimetyping
if path.startswith('/'):
realpath = path[1:]
if realpath.endswith('/'):
realpath = realpath[:-1]
# split paths to find subdirs
paths = path.split('/')
paths = [p for p in paths if p != '']
# if found more than one, subdirectories exist
if len(paths) > 1:
outdir = os.path.abspath(os.path.join(deploy_dir, *paths[:-1]))
if not os.path.exists(outdir):
os.makedirs(outdir)
response, mime = self.render_page(path)
outpath = os.path.join(outdir, '{0}{1}'.format(paths[-1], mime))
self.save_page(response, outpath)
else:
response, mime = self.render_page(path)
outpath = os.path.join(outpath, '{0}{1}'.format(realpath, mime))
self.save_page(response, outpath)
def render_page(self, path):
response = self.client.get(path)
if response.status_code is not 200:
raise RendererException(
'Path: {0} returns status code: {1}.'.format(path, response.status_code))
return response, self.get_mime(response)
def get_mime(self, response):
mime = response['Content-Type']
encoding = mime.split(';', 1)[0]
return mimetypes.guess_extension(encoding)
def save_page(self, response, outpath):
logger.debug(outpath)
with open(outpath, 'wb') as f:
f.write(response.content)
def generate(self):
self.client = Client()
for path in self.paths():
self.render_path(path=path)
|
mfcovington/django-lab-members
|
lab_members/migrations/0013_advisor_url.py
|
Python
|
bsd-3-clause
| 517
| 0.001934
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lab_members', '0012_sci
|
entist_email'),
]
operations = [
migrations.AddField(
model_name='advisor',
name='url',
field=models.URLField(help_text="Please enter advisor's websi
|
te", null=True, blank=True, verbose_name='advisor website'),
preserve_default=True,
),
]
|
CarlFK/veyepar
|
dj/main/migrations/0002_auto_20160116_2028.py
|
Python
|
mit
| 1,005
| 0.002985
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mark',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('click', models.DateTimeField(help_text=b'When Cut was Clicked.')),
('location', models.ForeignKey(t
|
o='main.Location')),
('show', models.ForeignKey(to='main.Show')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(default=b'69474658', max_length=32, null=True, help_text=b'key to allow unauthenticated users to edit this item.', blank=True),
preserve_defaul
|
t=True,
),
]
|
nextgis-extra/tests
|
lib_gdal/ogr/ogr_gpsbabel.py
|
Python
|
gpl-2.0
| 4,660
| 0.00515
|
#!/usr/bin/env python
###############################################################################
# $Id: ogr_gpsbabel.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR GPSBabel driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import gdal
###############################################################################
# Check that dependencies are met
def ogr_gpsbabel_init():
# Test if the gpsbabel is accessible
ogrtest.have_gpsbabel = False
ogrtest.have_read_gpsbabel = False
try:
ret = gdaltest.runexternal('gpsbabel -V')
except:
ret = ''
if ret.find('GPSBabel') == -1:
print('Cannot access GPSBabel utility')
return 'skip'
try:
ds = ogr.Open( 'data/test.gpx' )
except:
ds = None
if ds is None:
print('GPX driver not configured for read support')
else:
ogrtest.have_read_gpsbabel = True
ogrtest.have_gpsbabel = True
return 'success'
###############################################################################
# Test reading with explicit subdriver
def ogr_gpsbabel_1():
if not ogrtest.have_read_gpsbabel:
return 'skip'
ds = ogr.Open('GPSBabel:nmea:data/nmea.txt')
if ds is None:
return 'fail'
if ds.GetLayerCount() != 2:
return 'fail'
return 'success'
###############################################################################
# Test reading with implicit subdriver
def ogr_gpsbabel_2():
if not ogrtest.have_read_gpsbabel:
return 'skip'
ds = ogr.Open('data/nmea.txt')
if ds is None:
return 'fail'
if ds.GetLayerCount() != 2:
return 'fail'
return 'success'
###############################################################################
# Test writing
def ogr_gpsbabel_3():
if not ogrtest.have_gpsbabel:
return 'skip'
ds = ogr.GetDriverByName('GPSBabel').CreateDataSource('GPSBabel:nmea:tmp/nmea.txt')
lyr = ds.CreateLayer('track_points', geom_type = ogr.wkbPoint)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('track_fid', 0)
feat.SetField('track_seg_id', 0)
feat.SetField('track_name', 'TRACK_NAME')
feat.SetField('name', 'PT_NAME')
feat.SetField('hdop', 123)
feat.SetField('vdop', 456)
feat.SetField('pdop', 789)
feat.SetField('sat', 6)
feat.SetField('time', '2010/06/03 12:34:56')
feat.SetField('fix', '3d')
geom = ogr.CreateGeometryFromWkt('POINT(2.50 49.25)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = None
|
lyr = No
|
ne
ds = None
f = open('tmp/nmea.txt', 'rt')
res = f.read()
f.close()
gdal.Unlink('tmp/nmea.txt')
if res.find('$GPRMC') == -1 or \
res.find('$GPGGA') == -1 or \
res.find('$GPGSA') == -1:
gdaltest.post_reason('did not get expected result')
print(res)
return 'fail'
return 'success'
gdaltest_list = [
ogr_gpsbabel_init,
ogr_gpsbabel_1,
ogr_gpsbabel_2,
ogr_gpsbabel_3 ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_gpsbabel' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
JQIamo/artiq
|
artiq/test/lit/interleaving/error_inlining.py
|
Python
|
lgpl-3.0
| 447
| 0.008949
|
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
def f():
delay_mu(2)
def g():
delay_mu(2)
x = f if True else g
def h():
with interleave:
f()
# CHECK-L: ${LINE:+1}: fatal: it is
|
not possible to interleave this function call within a 'with interleave:' statement because the compiler could not prove that the same funct
|
ion would always be called
x()
|
malishevg/edugraph
|
lms/djangoapps/django_comment_client/helpers.py
|
Python
|
agpl-3.0
| 926
| 0.007559
|
from django.conf import settings
from mako.template import Template
import os
def include_mustache_templates():
mustache_dir = settings.PROJECT_ROOT / 'templates' / 'discussion' / 'mustache'
def is_valid_file_name(file_name):
return file_name.endswith('.mustache')
def read_file(file_name):
return open(mustache_dir / file_name, "r").read().decode('utf-8')
def template_id_from_file_name(file_name):
return file_name.rpartition('.')[0]
def process_mako(template_content):
return Template(template_content).render_unic
|
ode()
def make_script_tag(id, content):
return u"<script type='text/template' id='{0}'>{1}</script>".format(id, content)
return u'\n'.join(
make_script_tag(template_id_from_file_name(file_name), process_mako(read_file(file_name)))
for file_name in os.listdir(mustache_dir)
if is_
|
valid_file_name(file_name)
)
|
knuu/competitive-programming
|
hackerrank/algorithm/two_arrays.py
|
Python
|
mit
| 221
| 0.004525
|
for _ in range(int(input())):
|
N, K = map(int, input().split())
print("YES" if all(a + b >= K for a, b in zip(sorted(int(x) for x in input().split())
|
, reversed(sorted(int(x) for x in input().split())))) else "NO")
|
fladi/drf-haystack
|
drf_haystack/serializers.py
|
Python
|
mit
| 10,695
| 0.002525
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import copy
import warnings
from itertools import chain
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from haystack import fields as haystack_fields
from haystack.query import EmptySearchQuerySet
from haystack.utils import Highlighter
from rest_framework import serializers
from rest_framework.compat import OrderedDict
from rest_framework.fields import empty
from rest_framework.utils.field_mapping import ClassLookupDict, get_field_kwargs
from .fields import (
HaystackBooleanField, HaystackCharField, HaystackDateField, HaystackDateTimeField,
HaystackDecimalField, HaystackFloatField, HaystackIntegerField
)
class HaystackSerializer(serializers.Serializer):
"""
A `HaystackSerializer` which populates fields based on
which models that are available in the SearchQueryset.
"""
_field_mapping = ClassLookupDict({
haystack_fields.BooleanField: HaystackBooleanField,
haystack_fields.CharField: HaystackCharField,
haystack_fields.DateField: HaystackDateField,
haystack_fields.DateTimeField: HaystackDateTimeField,
haystack_fields.DecimalField: HaystackDecimalField,
haystack_fields.EdgeNgramField: HaystackCharField,
haystack_fields.FacetBooleanField: HaystackBooleanField,
haystack_fields.FacetCharField: HaystackCharField,
haystack_fields.FacetDateField: HaystackDateField,
haystack_fields.FacetDateTimeField: HaystackDateTimeField,
haystack_fields.FacetDecimalField: HaystackDecimalField,
haystack_fields.FacetFloatField: HaystackFloatField,
haystack_fields.FacetIntegerField: HaystackIntegerField,
haystack_fields.FacetMultiValueField: HaystackCharField,
haystack_fields.FloatField: HaystackFloatField,
haystack_fields.IntegerField: HaystackIntegerField,
haystack_fields.LocationField: HaystackCharField,
haystack_fields.MultiValueField: HaystackCharField,
haystack_fields.NgramField: HaystackCharField,
})
def __init__(self, instance=None, data=empty, **kwargs):
super(HaystackSerializer, self).__init__(instance, data, **kwargs)
try:
if not hasattr(self.Meta, "index_classes") and not hasattr(self.Meta, "serializers"):
raise ImproperlyConfigured("You must set either the 'index_classes' or 'serializers' "
|
"attribute on the serializer Meta class.")
|
except AttributeError:
raise ImproperlyConfigured("%s must implement a Meta class." % self.__class__.__name__)
if not self.instance:
self.instance = EmptySearchQuerySet()
@staticmethod
def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
if field.model_attr in model._meta.get_all_field_names():
model_field = model._meta.get_field_by_name(field.model_attr)[0]
kwargs = get_field_kwargs(field.model_attr, model_field)
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
return kwargs
def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = getattr(self.Meta, "fields", [])
exclude = getattr(self.Meta, "exclude", [])
if fields and exclude:
raise ImproperlyConfigured("Cannot set both `fields` and `exclude`.")
ignore_fields = getattr(self.Meta, "ignore_fields", [])
indices = getattr(self.Meta, "index_classes")
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# This has become a little more complex, but provides convenient flexibility for users
if not exclude:
if orig_name not in fields and field_name not in fields:
continue
elif orig_name in exclude or field_name in exclude or orig_name in ignore_fields or field_name in ignore_fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
if field_name in field_mapping:
warnings.warn("Field '{field}' already exists in the field list. This *will* "
"overwrite existing field '{field}'".format(field=field_name))
field_mapping[field_name] = declared_fields[field_name]
return field_mapping
def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if getattr(self.Meta, "serializers", None):
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret
def multi_serializer_representation(self, instance):
serializers = self.Meta.serializers
index = instance.searchindex
serializer_class = serializers.get(type(index), None)
if not serializer_class:
raise ImproperlyConfigured("Could not find serializer for %s in mapping" % index)
return serializer_class(context=self._context).to_representation(instance)
def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may option
|
alexhersh/calico
|
calico/felix/test/__init__.py
|
Python
|
apache-2.0
| 659
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASI
|
S,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
_log = logging.getLogger(__name__)
|
zalax303/test_django
|
myforum/article/models.py
|
Python
|
apache-2.0
| 786
| 0.01752
|
# coding:utf-8
from django.contrib.auth.model
|
s import User
from django.db import models
from block.models import Block
# Create your models here.
class Article(model
|
s.Model):
block = models.ForeignKey(Block, verbose_name=u"所属板块")
owner = models.ForeignKey(User, verbose_name=u"作者")
title = models.CharField(verbose_name=u"标题", max_length=100)
content = models.CharField(verbose_name=u"内容", max_length=10000)
status = models.IntegerField(verbose_name=u"状态", choices=((0, u"普通"), (-1, u"剔除"), (10, u"精华")), default=0)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u"文章"
verbose_name_plural = u"文章"
|
TonyApuzzo/fuzzyjoin
|
fuzzyjoin-hadoop/src/test/scripts/plot/timeline.py
|
Python
|
apache-2.0
| 2,845
| 0.027768
|
#!/usr/bin/env python
#
# Copyright 2010-2011 The Regents of the University of California
#
# Licensed under the Apache License, V
|
ersion 2.0 (the "License"); you
# may not use thi
|
s file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"; BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Author: Rares Vernica <rares (at) ics.uci.edu>
from pychart import *
import os
import sys
fname_in = sys.argv[1]
fname_out = fname_in[:fname_in.rindex('.')] + '.eps'
x_max = y_max = x_grid_interval = y_grid_interval = None
if len(sys.argv) > 2:
x_max = int(sys.argv[2])
x_grid_interval = x_max / 3
if len(sys.argv) > 3:
y_max = int(sys.argv[3])
y_grid_interval = y_max / 10
###
### Data
###
### time maps shuffle merge reduce waste
data = chart_data.read_csv(fname_in, '%f %d %d %d %d %d')
data = chart_data.transform(
lambda x: [
x[0], 0, x[1], sum(x[1:3]), sum(x[1:4]), sum(x[1:5]), sum(x[1:6])],
data)
max = 50
if len(data) > max * 2:
m = len(data) / max
data = chart_data.filter(lambda x: x[0] % m == 0, data)
###
### Plot
###
theme.get_options()
theme.scale_factor = 3
loc = (0, 0)
loc = (-60, 0)
loc_legend = (-40, -80)
theme.reinitialize()
can = canvas.init(fname_out)
d = '_'.join(os.getcwd().split('/')[-6:] + [fname_out[:fname_out.rfind('.')]])
ar = area.T(
x_axis = axis.X(label = 'Time (seconds)\n' + d, format = '/a-30{}%d'),
x_range = (0, x_max),
x_grid_interval = x_grid_interval,
y_axis = axis.Y(label = '# Tasks', format = '%d'),
y_range = (0, y_max),
y_grid_interval = y_grid_interval,
loc = loc,
legend = legend.T(loc = loc_legend))
colors = [ fill_style.white, fill_style.gray90, fill_style.diag, fill_style.black, fill_style.rdiag3 ]
ar.add_plot(range_plot.T(
label = 'maps',
data = data,
fill_style = colors[0]))
ar.add_plot(range_plot.T(
label = 'shuffle',
data = data,
min_col = 2,
max_col = 3,
fill_style = colors[1]))
ar.add_plot(range_plot.T(
label = 'merge',
data = data,
min_col = 3,
max_col = 4,
fill_style = colors[2]))
ar.add_plot(range_plot.T(
label = 'reduce',
data = data,
min_col = 4,
max_col = 5,
fill_style = colors[3]))
# ar.add_plot(range_plot.T(
# label = 'waste',
# data = data,
# min_col = 5,
# max_col = 6,
# fill_style = colors[4]))
ar.draw()
can.close()
print fname_out, 'wrote'
|
mineo/picard
|
picard/ui/infodialog.py
|
Python
|
gpl-2.0
| 15,013
| 0.001666
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
import os.path
import re
import traceback
from PyQt5 import (
QtCore,
QtGui,
QtWidgets,
)
from picard import log
from picard.album import Album
from picard.coverart.image import CoverArtImageIOError
from picard.file import File
from picard.track import Track
from picard.util import (
bytes2human,
encode_filename,
format_time,
htmlescape,
union_sorted_lists,
webbrowser2,
)
from picard.ui import PicardDialog
from picard.ui.ui_infodialog import Ui_InfoDialog
from picard.ui.util import StandardButton
class ArtworkCoverWidget(QtWidgets.QWidget):
"""A QWidget that can be added to artwork column cell of ArtworkTable."""
SIZE = 170
def __init__(self, pixmap=None, text=None, parent=None):
super().__init__(parent=parent)
layout = QtWidgets.QVBoxLayout()
if pixmap is not None:
image_label = QtWidgets.QLabel()
image_label.setPixmap(pixmap.scaled(self.SIZE, self.SIZE,
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation))
image_label.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(image_label)
if text is not None:
text_label = QtWidgets.QLabel()
text_label.setText(text)
text_label.setAlignment(QtCore.Qt.AlignCenter)
text_label.setWordWrap(True)
layout.addWidget(text_label)
self.setLayout(layout)
class ArtworkTable(QtWidgets.QTableWidget):
def __init__(self, display_existing_art):
super().__init__(0, 2)
self.display_existing_art = display_existing_art
h_header = self.horizontalHeader()
v_header = self.verticalHeader()
h_header.setDefaultSectionSize(200)
v_header.setDefaultSectionSize(230)
if self.display_existing_art:
self._existing_cover_col = 0
self._type_col = 1
self._new_cover_col = 2
self.insertColumn(2)
self.setHorizontalHeaderLabels([_("Existing Cover"), _("Type"),
_("New Cover")])
else:
self._type_col = 0
self._new_cover_col = 1
self.setHorizontalHeaderLabels([_("Type"), _("Cover")])
self.setColumnWidth(self._type_col, 140)
class InfoDialog(PicardDialog):
defaultsize = QtCore.QSize(665, 436)
autorestore = False
def __init__(self, obj, parent=None):
super().__init__(parent)
self.obj = obj
self.images = []
self.existing_images = []
self.ui = Ui_InfoDialog()
self.display_existing_artwork = False
if (isinstance(obj, File) and
isinstance(obj.parent, Track) or
isinstance(obj, Track) or
(isinstance(obj, Album) and obj.get_num_total_files() > 0)):
# Display existing artwork only if selected object is track object
# or linked to a track object or it's an album with files
if (getattr(obj, 'orig_metadata', None) is not None and
obj.orig_metadata.images and
obj.orig_metadata.images != obj.metadata.images):
self.display_existing_artwork = True
self.existing_images = obj.orig_metadata.im
|
ages
if obj.metadata.images:
self.images = obj.metadata.images
if not self.images and self.existing_images:
self.images = self.existing_images
self.existing_images = []
|
self.display_existing_artwork = False
self.ui.setupUi(self)
self.ui.buttonBox.addButton(
StandardButton(StandardButton.CLOSE), QtWidgets.QDialogButtonBox.AcceptRole)
self.ui.buttonBox.accepted.connect(self.accept)
# Add the ArtworkTable to the ui
self.ui.artwork_table = ArtworkTable(self.display_existing_artwork)
self.ui.artwork_table.setObjectName("artwork_table")
self.ui.vboxlayout1.addWidget(self.ui.artwork_table)
self.setTabOrder(self.ui.tabWidget, self.ui.artwork_table)
self.setTabOrder(self.ui.artwork_table, self.ui.buttonBox)
self.setWindowTitle(_("Info"))
self.artwork_table = self.ui.artwork_table
self._display_tabs()
self.restore_geometry()
def _display_tabs(self):
self._display_info_tab()
self._display_artwork_tab()
def _display_artwork(self, images, col):
"""Draw artwork in corresponding cell if image type matches type in Type column.
Arguments:
images -- The images to be drawn.
col -- Column in which images are to be drawn. Can be _new_cover_col or _existing_cover_col.
"""
row = 0
row_count = self.artwork_table.rowCount()
for image in images:
while row != row_count:
image_type = self.artwork_table.item(row, self.artwork_table._type_col)
if image_type and image_type.data(QtCore.Qt.UserRole) == image.types_as_string():
break
row += 1
if row == row_count:
continue
data = None
try:
if image.thumbnail:
try:
data = image.thumbnail.data
except CoverArtImageIOError as e:
log.warning(e)
else:
data = image.data
except CoverArtImageIOError:
log.error(traceback.format_exc())
continue
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.UserRole, image)
pixmap = QtGui.QPixmap()
if data is not None:
pixmap.loadFromData(data)
item.setToolTip(
_("Double-click to open in external viewer\n"
"Temporary file: %s\n"
"Source: %s") % (image.tempfile_filename, image.source))
infos = []
if image.comment:
infos.append(image.comment)
infos.append("%s (%s)" %
(bytes2human.decimal(image.datalength),
bytes2human.binary(image.datalength)))
if image.width and image.height:
infos.append("%d x %d" % (image.width, image.height))
infos.append(image.mimetype)
img_wgt = ArtworkCoverWidget(pixmap=pixmap, text="\n".join(infos))
self.artwork_table.setCellWidget(row, col, img_wgt)
self.artwork_table.setItem(row, col, item)
row += 1
def _display_artwork_type(self):
"""Display image type in Type column.
If both existing covers and new covers are to be displayed, take union of both cover types list.
"""
types = [image.types_as_string() for image in self.images]
if self.display_existing_artwork:
existing_types = [image.types_as_string() for image in self.existing_images]
# Merge both types and existing types list in sorted order.
types = union_sorted_lists(types, existing_types)
pixmap_arrow = QtGui.QPixmap(":/images/arrow.png")
el
|
wangyixiaohuihui/spark2-annotation
|
python/pyspark/sql/utils.py
|
Python
|
apache-2.0
| 4,112
| 0.001946
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL c
|
ommand.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class Streaming
|
QueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
|
autotest/virt-test
|
shared/scripts/dd.py
|
Python
|
gpl-2.0
| 310
| 0
|
import sys
import os
if
|
len(sys.argv) != 3:
print "Useage: %s path size"
path = sys.argv[1]
size = int(sys.argv[2])
if not os.path.isdir(os.path.dirname(path)):
os.mkdir(os.path.dirname(path))
writefile = open(path, 'w')
writefile.seek(1024 * 1024 * size)
writefile.write('\x00')
|
writefile.close()
|
artefactual/archivematica-storage-service
|
storage_service/locations/models/space.py
|
Python
|
agpl-3.0
| 35,298
| 0.00187
|
# stdlib, alphabetical
from __future__ import absolute_import
import datetime
import errno
import logging
import os
import re
import shutil
import stat
import subprocess
import tempfile
# Core Django, alphabetical
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import six
# Third party dependencies, alphabetical
import scandir
from django_extensions.db.fields import UUIDField
# This project, alphabetical
from common import utils
LOGGER = logging.getLogger(__name__)
# This module, alphabetical
from . import StorageException # noqa: E402
__all__ = ("Space", "PosixMoveUnsupportedError")
def validate_space_path(path):
""" Validation for path in Space. Must be absolute. """
if path[0] != "/":
raise ValidationError(_("Path must begin with a /"))
# To add a new storage space the following places must be updated:
# locations/models/space.py (this file)
# Add constant for storage protocol
# Add constant to ACCESS_PROTOCOL_CHOICES
# locations/models/<spacename>.py
# Add class for protocol-specific fields using template below
# locations/models/__init__.py
# Add class to import list
# locations/forms.py
# Add ModelForm for new class
# common/constants.py
# Add entry to protocol
# 'model' is the model object
# 'form' is the ModelForm for creating the space
# 'fields' is a allowlist of fields to display to the user
# locations/migrations/####_<spacename>.py
# Run `manage.py makemigrations locations` to create a migration.
# Rename the migration after the feature. Eg. 0005_auto_20160331_1337.py -> 0005_dspace.py
# locations/tests/test_<spacename>.py
# Add class for tests. Example template below
# class Example(models.Model):
# space = models.OneToOneField('Space', to_field='uuid')
#
# class Meta:
# verbose_name = "Example Space"
# app_label = 'locations'
#
# ALLOWED_LOCATION_PURPOSE = [
# Location.AIP_RECOVERY,
# Location.AIP_STORAGE,
# Location.CURRENTLY_PROCESSING,
# Location.DIP_STORAGE,
# Location.STORAGE_SERVICE_INTERNAL,
# Location.TRANSFER_SOURCE,
# Location.BACKLOG,
# ]
#
# def browse(self, path):
# pass
#
# def delete_path(self, delete_path):
# pass
#
# def move_to_storage_service(self, src_path, dest_path, dest_space):
# """ Moves src_path to dest_space.staging_path/dest_path. """
# pass
#
# def move_from_storage_service(self, source_path, destination_path, package=None):
# """ Moves self.staging_path/src_path to dest_path. """
# pass
# from django.test import TestCase
# import vcr
#
# from locations import models
#
# THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# FIXTURES_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', 'fixtures'))
#
# class TestExample(TestCase):
#
# fixtures = ['base.json', 'example.json']
#
# def setUp(self):
# self.example_object = models.Example.objects.all()[0]
#
# @vcr.use_cassette(os.path.join(FIXTURES_DIR, 'vcr_cassettes', 'example_browse.yaml'))
# def test_browse(self):
# pass
#
# @vcr.use_cassette(os.path.join(FIXTURES_DIR, 'vcr_cassettes', 'example_delete.yaml'))
# def test_delete(self):
# pass
#
# @vcr.use_cassette(os.path.join(FIXTURES_DIR, 'vcr_cassettes', 'example_move_from_ss.yaml'))
# def test_move_from_ss(self):
# pass
#
# @vcr.use_cassette(os.path.join(FIXTURES_DIR, 'vcr_cassettes', 'example_move_to_ss.yaml'))
# def test_move_to_ss(self):
# pass
@six.python_2_unicode_compatible
class Space(models.Model):
"""Common storage space information.
Knows what protocol to use to access a storage space, but all protocol
specific information is in children classes with ForeignKeys to Space."""
uuid = UUIDField(
editable=False, unique=True, version=4, help_text=_("Unique identifier")
)
# Max length 8 (see access_protocol definition)
ARKIVUM = "ARKIVUM"
DATAVERSE = "DV"
DURACLOUD = "DC"
DSPACE = "DSPACE"
DSPACE_REST = "DSPC_RST"
FEDORA = "FEDORA"
LOCAL_FILESYSTEM = "FS"
LOM = "LOM"
NFS = "NFS"
OFFLINE_REPLICA_STAGING = "REPLICA"
PIPELINE_LOCAL_FS = "PIPE_FS"
SWIFT = "SWIFT"
GPG = "GPG"
S3 = "S3"
# These will not be displayed in the Space Create GUI (see locations/forms.py)
BETA_PROTOCOLS = {}
OBJECT
|
_STORAGE = {DATAVERSE, DSPACE, DSPACE_REST, DURACL
|
OUD, SWIFT, S3}
ACCESS_PROTOCOL_CHOICES = (
(ARKIVUM, _("Arkivum")),
(DATAVERSE, _("Dataverse")),
(DURACLOUD, _("DuraCloud")),
(DSPACE, _("DSpace via SWORD2 API")),
(DSPACE_REST, _("DSpace via REST API")),
(FEDORA, _("FEDORA via SWORD2")),
(GPG, _("GPG encryption on Local Filesystem")),
(LOCAL_FILESYSTEM, _("Local Filesystem")),
(LOM, _("LOCKSS-o-matic")),
(NFS, _("NFS")),
(OFFLINE_REPLICA_STAGING, _("Write-Only Replica Staging on Local Filesystem")),
(PIPELINE_LOCAL_FS, _("Pipeline Local Filesystem")),
(SWIFT, _("Swift")),
(S3, _("S3")),
)
access_protocol = models.CharField(
max_length=8,
choices=ACCESS_PROTOCOL_CHOICES,
verbose_name=_("Access protocol"),
help_text=_("How the space can be accessed."),
)
size = models.BigIntegerField(
default=None,
null=True,
blank=True,
verbose_name=_("Size"),
help_text=_("Size in bytes (optional)"),
)
used = models.BigIntegerField(
default=0, verbose_name=_("Used"), help_text=_("Amount used in bytes")
)
path = models.TextField(
default="",
blank=True,
verbose_name=_("Path"),
help_text=_("Absolute path to the space on the storage service machine."),
)
staging_path = models.TextField(
validators=[validate_space_path],
verbose_name=_("Staging path"),
help_text=_(
"Absolute path to a staging area. Must be UNIX filesystem compatible, preferably on the same filesystem as the path."
),
)
verified = models.BooleanField(
default=False,
verbose_name=_("Verified"),
help_text=_("Whether or not the space has been verified to be accessible."),
)
last_verified = models.DateTimeField(
default=None,
null=True,
blank=True,
verbose_name=_("Last verified"),
help_text=_("Time this location was last verified to be accessible."),
)
class Meta:
verbose_name = _("Space")
app_label = "locations"
def __str__(self):
return six.text_type("{uuid}: {path} ({access_protocol}").format(
uuid=self.uuid,
access_protocol=self.get_access_protocol_display(),
path=self.path,
)
def clean(self):
# Object storage spaces do not require a path, or for it to start with /
if self.access_protocol not in self.OBJECT_STORAGE:
if not self.path:
raise ValidationError(_("Path is required"))
validate_space_path(self.path)
def get_child_space(self):
""" Returns the protocol-specific space object. """
# Importing PROTOCOL here because importing locations.constants at the
# top of the file causes a circular dependency
from ..constants import PROTOCOL
protocol_model = PROTOCOL[self.access_protocol]["model"]
protocol_space = protocol_model.objects.get(space=self)
# TODO try-catch AttributeError if remote_user or remote_name not exist?
return protocol_space
def browse(self, path, *args, **kwargs):
"""
Return information about the objects (files, directories) at `path`.
Attempts to call the child space's implementation. If not found, falls
back to looking for the path locally.
Returns a dictionary with keys 'entries', 'directories' and 'properties'.
'entries' is a list of strings, one for each entry in that directory, both fi
|
akesandgren/easybuild-framework
|
test/framework/module_generator.py
|
Python
|
gpl-2.0
| 69,283
| 0.003464
|
##
# Copyright 2012-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for module_generator.py.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import sys
import tempfile
from distutils.version import LooseVersion
from unittest import TextTestRunner, TestSuite
from easybuild.framework.easyconfig.tools import process_easyconfig
from easybuild.tools import config
from easybuild.tools.filetools import mkdir, read_file, remove_file, write_file
from easybuild.tools.module_generator import ModuleGeneratorLua, ModuleGeneratorTcl, dependencies_for
from easybuild.tools.module_naming_scheme.utilities import is_valid_module_name
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig.easyconfig import EasyConfig, ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import EnvironmentModulesC, EnvironmentModulesTcl, Lmod
from easybuild.tools.utilities import quote_str
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, find_full_path, init_config
class ModuleGeneratorTest(EnhancedTestCase):
"""Tests for module_generator module."""
MODULE_GENERATOR_CLASS = None
def setUp(self):
"""Test setup."""
super(ModuleGeneratorTest, self).setUp()
# find .eb file
topdir = os.path.dirname(os.path.abspath(__file__))
eb_path = os.path.join
|
(topdir, 'easyconfigs', 'test_ecs', 'g', 'gzip', 'gzip-1.4.eb')
eb_full_pa
|
th = find_full_path(eb_path)
self.assertTrue(eb_full_path)
ec = EasyConfig(eb_full_path)
self.eb = EasyBlock(ec)
self.modgen = self.MODULE_GENERATOR_CLASS(self.eb)
self.modgen.app.installdir = tempfile.mkdtemp(prefix='easybuild-modgen-test-')
self.orig_module_naming_scheme = config.get_module_naming_scheme()
def test_descr(self):
"""Test generation of module description (which includes '#%Module' header)."""
descr = "gzip (GNU zip) is a popular data compression program as a replacement for compress"
homepage = "http://www.gzip.org/"
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
"proc ModulesHelp { } {",
" puts stderr {",
'',
'Description',
'===========',
"%s" % descr,
'',
'',
"More information",
"================",
" - Homepage: %s" % homepage,
" }",
"}",
'',
"module-whatis {Description: %s}" % descr,
"module-whatis {Homepage: %s}" % homepage,
"module-whatis {URL: %s}" % homepage,
'',
"set root %s" % self.modgen.app.installdir,
'',
"conflict gzip",
'',
])
else:
expected = '\n'.join([
"help([==[",
'',
'Description',
'===========',
"%s" % descr,
'',
'',
"More information",
"================",
" - Homepage: %s" % homepage,
']==])',
'',
"whatis([==[Description: %s]==])" % descr,
"whatis([==[Homepage: %s]==])" % homepage,
"whatis([==[URL: %s]==])" % homepage,
'',
'local root = "%s"' % self.modgen.app.installdir,
'',
'conflict("gzip")',
'',
])
desc = self.modgen.get_description()
self.assertEqual(desc, expected)
# Test description with list of 'whatis' strings
self.eb.cfg['whatis'] = ['foo', 'bar']
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl:
expected = '\n'.join([
"proc ModulesHelp { } {",
" puts stderr {",
'',
'Description',
'===========',
"%s" % descr,
'',
'',
"More information",
"================",
" - Homepage: %s" % homepage,
" }",
"}",
'',
"module-whatis {foo}",
"module-whatis {bar}",
'',
"set root %s" % self.modgen.app.installdir,
'',
"conflict gzip",
'',
])
else:
expected = '\n'.join([
"help([==[",
'',
'Description',
'===========',
"%s" % descr,
'',
'',
"More information",
"================",
" - Homepage: %s" % homepage,
']==])',
'',
"whatis([==[foo]==])",
"whatis([==[bar]==])",
'',
'local root = "%s"' % self.modgen.app.installdir,
'',
'conflict("gzip")',
'',
])
desc = self.modgen.get_description()
self.assertEqual(desc, expected)
def test_set_default_module(self):
"""
Test load part in generated module file.
"""
# note: the lua modulefiles are only supported by Lmod. Therefore,
# skipping when it is not the case
if self.MODULE_GENERATOR_CLASS == ModuleGeneratorLua and not isinstance(self.modtool, Lmod):
return
# creating base path
base_path = os.path.join(self.test_prefix, 'all')
mkdir(base_path)
# creating package module
module_name = 'foobar_mod'
modules_base_path = os.path.join(base_path, module_name)
mkdir(modules_base_path)
# creating two empty modules
txt = self.modgen.MODULE_SHEBANG
if txt:
txt += '\n'
txt += self.modgen.get_description()
txt += self.modgen.set_environment('foo', 'bar')
version_one = '1.0'
version_one_path = os.path.join(modules_base_path, version_one + self.modgen.MODULE_FILE_EXTENSION)
write_file(version_one_path, txt)
version_two = '2.0'
version_two_path = os.path.join(modules_base_path, version_two + self.modgen.MODULE_FILE_EXTENSION)
write_file(version_two_path, txt)
# using base_path to possible module load
self.modtool.use(base_path)
# setting foo version as default
self.modgen.set_as_default(modules_base_path, version_one)
self.modtool.load([module_name])
full_module_name = module_name + '/' + version_one
self.assertTrue(full_module_name in self.modtool.loaded_modules())
self.modtool.purge()
# setting bar version as default
self.modgen.set_as_default(modules_base_path, version_two)
self.modtool.load([m
|
openqt/algorithms
|
projecteuler/pe489-common-factors-between-two-sequences.py
|
Python
|
gpl-3.0
| 506
| 0.018182
|
#!/usr/bin/env python
# coding=utf-8
"""
|
489. Common factors between two sequences
https://p
|
rojecteuler.net/problem=489
Let G(a, b) be the smallest non-negative integer n for which gcd(n3 \+ b, (n
\+ a)3 \+ b) is maximized.
For example, G(1, 1) = 5 because gcd(n3 \+ 1, (n \+ 1)3 \+ 1) reaches its
maximum value of 7 for n = 5, and is smaller for 0 ≤ n < 5.
Let H(m, n) = Σ G(a, b) for 1 ≤ a ≤ m, 1 ≤ b ≤ n.
You are given H(5, 5) = 128878 and H(10, 10) = 32936544.
Find H(18, 1900).
"""
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/pymdownx/smartsymbols.py
|
Python
|
mit
| 5,483
| 0.002371
|
"""
Smart Symbols.
pymdownx.smartsymbols
Really simple plugin to add support for:
copyright, trademark, and registered symbols
plus/minus, not equal, arrows via:
copyright = `(c)`
trademark = `(tm)`
registered = `(r)`
plus/minus = `+/-`
care/of = `c/o`
fractions = `1/2` etc.
(only certain available unicode fractions)
arrows:
left = `<--`
right = `-->`
both = `<-->`
not equal = `=/=`
(maybe this could be =/= in the future as this might be more
intuitive to non-programmers)
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from ..markdown import Extension
from ..markdown import treeprocessors
from ..markdown.util import Registry
from ..markdown.inlinepatterns import HtmlInlineProcessor
RE_TRADE = ("smart-trademark", r'\(tm\)', r'™')
RE_COPY = ("smart-copyright", r'\(c\)', r'©')
RE_REG = ("smart-registered", r'\(r\)', r'®')
RE_PLUSMINUS = ("smart-plus-minus", r'\+/-', r'±')
RE_NOT_EQUAL = ("smart-not-equal", r'=/=', r'≠')
RE_CARE_OF = ("smart-care-of", r'\bc/o\b', r'℅')
RE_ORDINAL_NUMBERS = (
"smart-ordinal-numbers",
r'''(?x)
\b
(?P<leading>(?:[1-9][0-9]*)?)
(?P<tail>(?<=1)(?:1|
|
2|3)th|1st|2nd|3rd|[04-9]th)
\b
''',
lambda m: '%s%s<sup>%s</sup>' % (
m.group('leading') if m.group('leading') else '',
|
m.group('tail')[:-2], m.group('tail')[1:]
)
)
RE_ARROWS = (
"smart-arrows",
r'(?P<arrows>\<-{2}\>|(?<!-)-{2}\>|\<-{2}(?!-))',
lambda m: ARR[m.group('arrows')]
)
RE_FRACTIONS = (
"smart-fractions",
r'(?<!\d)(?P<fractions>1/4|1/2|3/4|1/3|2/3|1/5|2/5|3/5|4/5|1/6|5/6|1/8|3/8|5/8|7/8)(?!\d)',
lambda m: FRAC[m.group('fractions')]
)
REPL = {
'trademark': RE_TRADE,
'copyright': RE_COPY,
'registered': RE_REG,
'plusminus': RE_PLUSMINUS,
'arrows': RE_ARROWS,
'notequal': RE_NOT_EQUAL,
'fractions': RE_FRACTIONS,
'ordinal_numbers': RE_ORDINAL_NUMBERS,
'care_of': RE_CARE_OF
}
FRAC = {
"1/4": "¼",
"1/2": "½",
"3/4": "¾",
"1/3": "⅓",
"2/3": "⅔",
"1/5": "⅕",
"2/5": "⅖",
"3/5": "⅗",
"4/5": "⅘",
"1/6": "⅙",
"5/6": "⅚",
"1/8": "⅛",
"3/8": "⅜",
"5/8": "⅝",
"7/8": "⅞"
}
ARR = {
'-->': "→",
'<--': "←",
'<-->': "↔"
}
class SmartSymbolsPattern(HtmlInlineProcessor):
"""Smart symbols patterns handler."""
def __init__(self, pattern, replace, md):
"""Setup replace pattern."""
super(SmartSymbolsPattern, self).__init__(pattern, md)
self.replace = replace
def handleMatch(self, m, data):
"""Replace symbol."""
return self.md.htmlStash.store(
m.expand(self.replace(m) if callable(self.replace) else self.replace),
), m.start(0), m.end(0)
class SmartSymbolsExtension(Extension):
"""Smart Symbols extension."""
def __init__(self, *args, **kwargs):
"""Setup config of which symbols are enabled."""
self.config = {
'trademark': [True, 'Trademark'],
'copyright': [True, 'Copyright'],
'registered': [True, 'Registered'],
'plusminus': [True, 'Plus/Minus'],
'arrows': [True, 'Arrows'],
'notequal': [True, 'Not Equal'],
'fractions': [True, 'Fractions'],
'ordinal_numbers': [True, 'Ordinal Numbers'],
'care_of': [True, 'Care/of']
}
super(SmartSymbolsExtension, self).__init__(*args, **kwargs)
def add_pattern(self, patterns, md):
"""Construct the inline symbol pattern."""
self.patterns.register(SmartSymbolsPattern(patterns[1], patterns[2], md), patterns[0], 30)
def extendMarkdown(self, md):
"""Create a dict of inline replace patterns and add to the tree processor."""
configs = self.getConfigs()
self.patterns = Registry()
for k, v in REPL.items():
if configs[k]:
self.add_pattern(v, md)
inline_processor = treeprocessors.InlineProcessor(md)
inline_processor.inlinePatterns = self.patterns
md.treeprocessors.register(inline_processor, "smart-symbols", 2.1)
def makeExtension(*args, **kwargs):
"""Return extension."""
return SmartSymbolsExtension(*args, **kwargs)
|
pratapvardhan/pandas
|
pandas/tests/scalar/period/test_asfreq.py
|
Python
|
bsd-3-clause
| 36,821
| 0
|
import pytest
from pandas.errors import OutOfBoundsDatetime
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas._libs.tslibs.frequencies import _period_code_map
class TestFreqConversion(object):
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize('freq', ['A', 'Q', 'M', 'W', 'B', 'D'])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period('0001-01-01', freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert (per - 1).ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period('0001-01-01', 'D') + 6
per2 = Period('0001-01-01', 'D') - 6
week1 = per1.asfreq('W')
week2 = per2.asfreq('W')
assert week1 != week2
assert week1.asfreq('D', 'E') >= per1
assert week2.asfreq('D', 'S') <= per2
@pytest.mark.xfail(reason='GH#19643 period_helper asfreq functions fail '
'to check for overflows')
def test_to_timestamp_out_of_bounds(self):
# GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848')
per = Period('0001-01-01', freq='B')
with pytest.raises(OutOfBoundsDatetime):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
assert result1.ordinal == expected.ordinal
assert result1.freqstr == '5T'
assert result2.ordinal == expected.ordinal
assert result2.freqstr == 'T'
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert ival_A.asfreq('Q', 'S') == ival_A_to_Q_start
assert ival_A.asfreq('Q', 'e') == ival_A_to_Q_end
assert ival_A.asfreq('M', 's') == ival_A_to_M_start
assert ival_A.asfreq('M', 'E') == ival_A_to_M_end
assert ival_A.asfreq('W', 'S') == ival_A_to_W_start
assert ival_A.asfreq('W', 'E') == ival_A_to_W_end
assert ival_A.asfreq('B', 'S') == ival_A_to_B_start
assert ival_A.asfreq('B', 'E') == ival_A_to_B_end
assert ival_A.asfreq('D', 'S') == ival_A_to_D_start
assert ival_A.asfreq('D', 'E') == ival_A_to_D_end
assert ival_A.asfreq('H', 'S') == ival_A_to_H_start
assert ival_A.asfreq('H', 'E') == ival_A_to_H_end
assert ival_A.asfreq('min', 'S') == ival_A_to_T_start
assert ival_A.asfreq('min', 'E') == ival_A_to_T_end
assert ival_A.asfreq('T', 'S') == ival_A_to_T_start
assert ival_A.asfreq('T', 'E') == ival_A_to_T_end
assert ival_A.asfreq('S', 'S') == ival_A_to_S_start
assert ival_A.asfreq('S', 'E') == ival_A_to_S_end
assert ival_AJAN.asfreq('D', 'S') == ival_AJAN_to_D_start
assert ival_AJAN.asfreq('D', 'E') == ival_AJAN_to_D_end
assert ival_AJUN.asfreq('D', 'S') == ival_AJUN_to_D_start
assert ival_AJUN.asfreq('D', 'E') == ival_AJUN_to_D_end
assert ival_ANOV.asfreq('D', 'S') == ival_ANOV_to_D_start
assert ival_ANOV.asfreq('D', 'E') == ival_ANOV_to_D_end
assert ival_A.asfreq('A') == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
|
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
iv
|
al_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert ival_Q.asfreq('A') == ival_Q_to_A
assert ival_Q_end_of_year.asfreq('A') == ival_Q_to_A
assert ival_Q.asfreq('M', 'S') == ival_Q_to_M_start
assert ival_Q.asfreq('M', 'E') == ival_Q_to_M_end
assert ival_Q.asfreq('W', 'S') == ival_Q_to_W_start
assert ival_Q.asfreq('W', 'E') == ival_Q_to_W_end
assert ival_Q.asfreq('B', 'S') == ival_Q_to_B_start
assert ival_Q.asfreq('B', 'E') == ival_Q_to_B_end
assert ival_Q.asfreq('D', 'S') == ival_Q_to_D_start
assert ival_Q.asfreq('D', 'E') == ival_Q_to_D_end
assert ival_Q.asfreq('H', 'S') == ival_Q_to_H_start
assert ival_Q.asfreq('H', 'E') == ival_Q_to_H_end
assert ival_Q.asfreq('Min', 'S') == ival_Q_to_T_start
|
trendelkampschroer/msmtools
|
msmtools/analysis/dense/correlations.py
|
Python
|
lgpl-3.0
| 10,071
| 0.001986
|
# This file is part of MSMTools.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# MSMTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 29.11.2013
.. moduleauthor:: marscher, noe
'''
from __future__ import absolute_import
import numpy as np
from .decomposition import rdl_decomposition
from six.moves import range
def time_correlation_by_diagonalization(P, pi, obs1, obs2=None, time=1, rdl=None):
"""
calculates time correlation. Raises P to power 'times' by diagonalization.
If rdl tuple (R, D, L) is given, it will be used for
further calculation.
"""
if rdl is None:
raise ValueError("no rdl decomposition")
R, D, L = rdl
d_times = np.diag(D) ** time
diag_inds = np.diag_indices_from(D)
D_time = np.zeros(D.shape, dtype=d_times.dtype)
D_time[diag_inds] = d_times
P_time = np.dot(np.dot(R, D_time), L)
# multiply element-wise obs1 and pi. this is obs1' diag(pi)
l = np.multiply(obs1, pi)
m = np.dot(P_time, obs2)
result = np.dot(l, m)
return result
def time_correlation_direct_by_mtx_vec_prod(P, mu, obs1, obs2=None, time=1, start_values=None, return_P_k_obs=False):
r"""Compute time-correlation of obs1, or time-cross-correlation with obs2.
The time-correlation at time=k is computed by the matrix-vector expression:
cor(k) = obs1' diag(pi) P^k obs2
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
obs1 : ndarray, shape=(n)
Vector representing observable 1 on discrete states
obs2 : ndarray, shape=(n)
Vector representing observable 2 on discrete states. If not given,
the autocorrelation of obs1 will be computed
mu : ndarray, shape=(n)
stationary distribution vector.
time : int
time point at which the (auto)correlation will be evaluated.
start_values : (time, ndarray <P, <P, obs2>>_t)
start iteration of calculation of matrix power product, with this values.
only useful when calling this function out of a loop over times.
return_P_k_obs : bool
if True, the dot product <P^time, obs2> will be returned for further
calculations.
Returns
-------
cor(k) : float
correlation between observations
"""
# input checks
if not (type(time) == int):
if not (type(time) == np.int64):
raise TypeError("given time (%s) is not an integer, but has type: %s"
% (str(time), type(time)))
if obs1.shape[0] != P.shape[0]:
raise ValueError("observable shape not compatible with given matrix")
if obs2 is None:
obs2 = obs1
# multiply element-wise obs1 and pi. this is obs1' diag(pi)
l = np.multiply(obs1, mu)
# raise transition matrix to power of time by substituting dot product
# <Pk, obs2> with something like <P, <P, obs2>>.
# This saves a lot of matrix matrix multiplications.
if start_values: # begin with a previous calculated val
P_i_obs = start_values[1]
# calculate difference properly!
time_prev = start_values[0]
t_diff = time - time_prev
r = range(t_diff)
else:
if time >= 2:
P_i_obs = np.dot(P, np.dot(P, obs2)) # vector <P, <P, obs2> := P^2 * obs
r = range(time - 2)
elif time == 1:
P_i_obs = np.dot(P, obs2) # P^1 = P*obs
r = range(0)
elif time == 0: # P^0 = I => I*obs2 = obs2
P_i_obs = obs2
r = range(0)
for k in r: # since we already substituted started with 0
P_i_obs = np.dot(P, P_i_obs)
corr = np.dot(l, P_i_obs)
if return_P_k_obs:
return corr, (time, P_i_obs)
else:
return corr
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]):
r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.
The time-correlation at time=k is computed by the matrix-vector expression:
cor(k) = obs1' diag(pi) P^k obs2
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
obs1 : ndarray, shape=(n)
Vector representing observable 1 on discrete states
obs2 : ndarray, shape=(n)
Vector representing
|
observable 2 on discrete states. If not given,
the autocorrelation of obs1 will be computed
pi : ndarray, shape=(n)
stationary distributio
|
n vector. Will be computed if not given
times : array-like, shape(n_t)
Vector of time points at which the (auto)correlation will be evaluated
Returns
-------
"""
n_t = len(times)
times = np.sort(times) # sort it to use caching of previously computed correlations
f = np.zeros(n_t)
# maximum time > number of rows?
if times[-1] > P.shape[0]:
use_diagonalization = True
R, D, L = rdl_decomposition(P)
# discard imaginary part, if all elements i=0
if not np.any(np.iscomplex(R)):
R = np.real(R)
if not np.any(np.iscomplex(D)):
D = np.real(D)
if not np.any(np.iscomplex(L)):
L = np.real(L)
rdl = (R, D, L)
if use_diagonalization:
for i in range(n_t):
f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2, times[i], rdl)
else:
start_values = None
for i in range(n_t):
f[i], start_values = \
time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2,
times[i], start_values, True)
return f
def time_relaxation_direct_by_mtx_vec_prod(P, p0, obs, time=1, start_values=None, return_pP_k=False):
r"""Compute time-relaxations of obs with respect of given initial distribution.
relaxation(k) = p0 P^k obs
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
p0 : ndarray, shape=(n)
initial distribution
obs : ndarray, shape=(n)
Vector representing observable on discrete states.
time : int or array like
time point at which the (auto)correlation will be evaluated.
start_values = (time,
Returns
-------
relaxation : float
"""
# input checks
if not type(time) == int:
if not type(time) == np.int64:
raise TypeError("given time (%s) is not an integer, but has type: %s"
% (str(time), type(time)))
if obs.shape[0] != P.shape[0]:
raise ValueError("observable shape not compatible with given matrix")
if p0.shape[0] != P.shape[0]:
raise ValueError("shape of init dist p0 (%s) not compatible with given matrix (shape=%s)"
% (p0.shape[0], P.shape))
# propagate in time
if start_values: # begin with a previous calculated val
pk_i = start_values[1]
time_prev = start_values[0]
t_diff = time - time_prev
r = range(t_diff)
else:
if time >= 2:
pk_i = np.dot(np.dot(p0, P), P) # pk_2
r = range(time - 2)
elif time == 1:
pk_i = np.dot(p0, P) # propagate once
r = range(0)
elif time == 0: # P^0 = I => p0*I = p0
pk_i = p0
r = range(0)
for k in r: # perform the rest of the propagations p0 P^t_diff
pk_i = np.dot(pk_i, P)
# result
l = np.dot(pk_i, obs)
if return_pP_k:
return l, (time, pk_i)
else:
ret
|
UKPLab/sentence-transformers
|
examples/training/quora_duplicate_questions/training_multi-task-learning.py
|
Python
|
apache-2.0
| 9,356
| 0.007482
|
"""
This script combines training_OnlineContrastiveLoss.py with training_MultipleNegativesRankingLoss.py
Online constrative loss works well for classification (are question1 and question2 duplicates?), but it
performs less well for duplicate questions mining. MultipleNegativesRankingLoss works well for duplicate
questions mining, but it has some issues with classification as it does not push dissimilar pairs away.
This script combines both losses to get the best of both worlds.
Multi task learning is achieved quite easily by calling the model.fit method like this:
model.fit(train_objectives=[(train_dataloader_MultipleNegativesRankingLoss, train_loss_MultipleNegativesRankingLoss), (train_dataloader_constrative_loss, train_loss_constrative_loss)] ...)
"""
from torch.utils.data import DataLoader
from sentence_transformers import losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import csv
import os
from zipfile import ZipFile
import random
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#As base model, we use DistilBERT-base that was pre-trained on NLI and STSb data
model = SentenceTransformer('stsb-distilbert-base')
#Training for multiple epochs can be beneficial, as in each epoch a mini-batch is sampled differently
#hence, we get different negatives for each positive
num_epochs = 10
#Increasing the batch size improves the performance for MultipleNegativesRankingLoss. Choose it as large as possible
#I achieved the good results with a batch size of 300-350 (requires about 30 GB of GPU memory)
train_batch_size = 64
#As distance metric, we use cosine distance (cosine_distance = 1-cosine_similarity)
distance_metric = losses.SiameseDistanceMetric.COSINE_DISTANCE
#Negative pairs should have a distance of at least 0.5
margin = 0.5
dataset_path = 'quora-IR-dataset'
model_save_path = 'output/training_multi-task-learning'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(model_save_path, exist_ok=True)
# Check if the dataset exists. If not, download and extract
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://sbert.net/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
######### Read train data ##########
train_samples_MultipleNegativesRankingLoss = []
train_samples_ConstrativeLoss = []
with open(os.path.join(dataset_path, "classification/train_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in r
|
eader:
train_samples_ConstrativeLoss.append(InputExample(texts=[row['question1'], row['question2']], label=int(row['is_duplicate'])))
if row['is_duplicate'] == '1':
train_samples_MultipleNegativesRankingLoss.append(InputExample(texts=[row['question1'], row['question2']], label=1))
train_samp
|
les_MultipleNegativesRankingLoss.append(InputExample(texts=[row['question2'], row['question1']], label=1)) # if A is a duplicate of B, then B is a duplicate of A
# Create data loader and loss for MultipleNegativesRankingLoss
train_dataloader_MultipleNegativesRankingLoss = DataLoader(train_samples_MultipleNegativesRankingLoss, shuffle=True, batch_size=train_batch_size)
train_loss_MultipleNegativesRankingLoss = losses.MultipleNegativesRankingLoss(model)
# Create data loader and loss for OnlineContrastiveLoss
train_dataloader_ConstrativeLoss = DataLoader(train_samples_ConstrativeLoss, shuffle=True, batch_size=train_batch_size)
train_loss_ConstrativeLoss = losses.OnlineContrastiveLoss(model=model, distance_metric=distance_metric, margin=margin)
################### Development Evaluators ##################
# We add 3 evaluators, that evaluate the model on Duplicate Questions pair classification,
# Duplicate Questions Mining, and Duplicate Questions Information Retrieval
evaluators = []
###### Classification ######
# Given (quesiton1, question2), is this a duplicate or not?
# The evaluator will compute the embeddings for both questions and then compute
# a cosine similarity. If the similarity is above a threshold, we have a duplicate.
dev_sentences1 = []
dev_sentences2 = []
dev_labels = []
with open(os.path.join(dataset_path, "classification/dev_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences1.append(row['question1'])
dev_sentences2.append(row['question2'])
dev_labels.append(int(row['is_duplicate']))
binary_acc_evaluator = evaluation.BinaryClassificationEvaluator(dev_sentences1, dev_sentences2, dev_labels)
evaluators.append(binary_acc_evaluator)
###### Duplicate Questions Mining ######
# Given a large corpus of questions, identify all duplicates in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_dev_samples = 10000
dev_sentences = {}
dev_duplicates = []
with open(os.path.join(dataset_path, "duplicate-mining/dev_corpus.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences[row['qid']] = row['question']
if len(dev_sentences) >= max_dev_samples:
break
with open(os.path.join(dataset_path, "duplicate-mining/dev_duplicates.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['qid1'] in dev_sentences and row['qid2'] in dev_sentences:
dev_duplicates.append([row['qid1'], row['qid2']])
# The ParaphraseMiningEvaluator computes the cosine similarity between all sentences and
# extracts a list with the pairs that have the highest similarity. Given the duplicate
# information in dev_duplicates, it then computes and F1 score how well our duplicate mining worked
paraphrase_mining_evaluator = evaluation.ParaphraseMiningEvaluator(dev_sentences, dev_duplicates, name='dev')
evaluators.append(paraphrase_mining_evaluator)
###### Duplicate Questions Information Retrieval ######
# Given a question and a large corpus of thousands questions, find the most relevant (i.e. duplicate) question
# in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_corpus_size = 100000
ir_queries = {} #Our queries (qid => question)
ir_needed_qids = set() #QIDs we need in the corpus
ir_corpus = {} #Our corpus (qid => question)
ir_relevant_docs = {} #Mapping of relevant documents for a given query (qid => set([relevant_question_ids])
with open(os.path.join(dataset_path, 'information-retrieval/dev-queries.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, query, duplicate_ids = line.strip().split('\t')
duplicate_ids = duplicate_ids.split(',')
ir_queries[qid] = query
ir_relevant_docs[qid] = set(duplicate_ids)
for qid in duplicate_ids:
ir_needed_qids.add(qid)
# First get all needed relevant documents (i.e., we must ensure, that the relevant questions are actually in the corpus
distraction_questions = {}
with open(os.path.join(dataset_path, 'information-retrieval/corpus.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, question = line.strip().split('\t')
if qid in ir_needed_qids:
ir_corpus[qid] = question
else:
distraction_questions[qid] = question
# Now, also add some irrelevant questions to fill our corpus
other_qid_list = list(distraction_questions.keys())
random.shuffle(other_qid_list)
for qid in other_qi
|
mtury/scapy
|
scapy/layers/tls/handshake.py
|
Python
|
gpl-2.0
| 61,432
| 0
|
# This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS handshake fields & logic.
This module covers the handshake TLS subprotocol, except for the key exchange
mechanisms which are addressed with keyexchange.py.
"""
from __future__ import absolute_import
import math
import struct
from scapy.error import log_runtime, warning
from scapy.fields import ByteEnumField, ByteField, EnumField, Field, \
FieldLenField, IntField, PacketField, PacketListField, ShortField, \
StrFixedLenField, StrLenField, ThreeBytesField, UTCTimeField
from scapy.compat import hex_bytes, orb, raw
from scapy.config import conf
from scapy.modules import six
from scapy.packet import Packet, Raw, Padding
from scapy.utils import randstring, repr_hex
from scapy.layers.x509 import OCSP_Response
from scapy.layers.tls.cert import Cert
from scapy.layers.tls.basefields import (_tls_version, _TLSVersionField,
_TLSClientVersionField)
from scapy.layers.tls.extensions import (_ExtensionsLenField, _ExtensionsField,
_cert_status_type, TLS_Ext_SupportedVersion_CH, # noqa: E501
TLS_Ext_SignatureAlgorithms,
TLS_Ext_SupportedVersion_SH,
TLS_Ext_EarlyDataIndication)
from scapy.layers.tls.keyexchange import (_TLSSignature, _TLSServerParamsField,
_TLSSignatureField, ServerRSAParams,
|
SigAndHashAlgsField, _tls_hash_sig,
|
SigAndHashAlgsLenField)
from scapy.layers.tls.session import (_GenericTLSSessionInheritance,
readConnState, writeConnState)
from scapy.layers.tls.crypto.compression import (_tls_compression_algs,
_tls_compression_algs_cls,
Comp_NULL, _GenericComp,
_GenericCompMetaclass)
from scapy.layers.tls.crypto.suites import (_tls_cipher_suites,
_tls_cipher_suites_cls,
_GenericCipherSuite,
_GenericCipherSuiteMetaclass)
###############################################################################
# Generic TLS Handshake message #
###############################################################################
_tls_handshake_type = {0: "hello_request", 1: "client_hello",
2: "server_hello", 3: "hello_verify_request",
4: "session_ticket", 6: "hello_retry_request",
8: "encrypted_extensions", 11: "certificate",
12: "server_key_exchange", 13: "certificate_request",
14: "server_hello_done", 15: "certificate_verify",
16: "client_key_exchange", 20: "finished",
21: "certificate_url", 22: "certificate_status",
23: "supplemental_data", 24: "key_update"}
class _TLSHandshake(_GenericTLSSessionInheritance):
"""
Inherited by other Handshake classes to get post_build().
Also used as a fallback for unknown TLS Handshake packets.
"""
name = "TLS Handshake Generic message"
fields_desc = [ByteEnumField("msgtype", None, _tls_handshake_type),
ThreeBytesField("msglen", None),
StrLenField("msg", "",
length_from=lambda pkt: pkt.msglen)]
def post_build(self, p, pay):
tmp_len = len(p)
if self.msglen is None:
l2 = tmp_len - 4
p = struct.pack("!I", (orb(p[0]) << 24) | l2) + p[4:]
return p + pay
def guess_payload_class(self, p):
return conf.padding_layer
def tls_session_update(self, msg_str):
"""
Covers both post_build- and post_dissection- context updates.
"""
self.tls_session.handshake_messages.append(msg_str)
self.tls_session.handshake_messages_parsed.append(self)
###############################################################################
# HelloRequest #
###############################################################################
class TLSHelloRequest(_TLSHandshake):
name = "TLS Handshake - Hello Request"
fields_desc = [ByteEnumField("msgtype", 0, _tls_handshake_type),
ThreeBytesField("msglen", None)]
def tls_session_update(self, msg_str):
"""
Message should not be added to the list of handshake messages
that will be hashed in the finished and certificate verify messages.
"""
return
###############################################################################
# ClientHello fields #
###############################################################################
class _GMTUnixTimeField(UTCTimeField):
"""
"The current time and date in standard UNIX 32-bit format (seconds since
the midnight starting Jan 1, 1970, GMT, ignoring leap seconds)."
"""
def i2h(self, pkt, x):
if x is not None:
return x
return 0
class _TLSRandomBytesField(StrFixedLenField):
def i2repr(self, pkt, x):
if x is None:
return repr(x)
return repr_hex(self.i2h(pkt, x))
class _SessionIDField(StrLenField):
"""
opaque SessionID<0..32>; section 7.4.1.2 of RFC 4346
"""
pass
class _CipherSuitesField(StrLenField):
__slots__ = ["itemfmt", "itemsize", "i2s", "s2i"]
islist = 1
def __init__(self, name, default, dico, length_from=None, itemfmt="!H"):
StrLenField.__init__(self, name, default, length_from=length_from)
self.itemfmt = itemfmt
self.itemsize = struct.calcsize(itemfmt)
i2s = self.i2s = {}
s2i = self.s2i = {}
for k in six.iterkeys(dico):
i2s[k] = dico[k]
s2i[dico[k]] = k
def any2i_one(self, pkt, x):
if (isinstance(x, _GenericCipherSuite) or
isinstance(x, _GenericCipherSuiteMetaclass)):
x = x.val
if isinstance(x, bytes):
x = self.s2i[x]
return x
def i2repr_one(self, pkt, x):
fmt = "0x%%0%dx" % self.itemsize
return self.i2s.get(x, fmt % x)
def any2i(self, pkt, x):
if x is None:
return None
if not isinstance(x, list):
x = [x]
return [self.any2i_one(pkt, z) for z in x]
def i2repr(self, pkt, x):
if x is None:
return "None"
tmp_len = [self.i2repr_one(pkt, z) for z in x]
if len(tmp_len) == 1:
tmp_len = tmp_len[0]
else:
tmp_len = "[%s]" % ", ".join(tmp_len)
return tmp_len
def i2m(self, pkt, val):
if val is None:
val = []
return b"".join(struct.pack(self.itemfmt, x) for x in val)
def m2i(self, pkt, m):
res = []
itemlen = struct.calcsize(self.itemfmt)
while m:
res.append(struct.unpack(self.itemfmt, m[:itemlen])[0])
m = m[itemlen:]
return res
def i2len(self, pkt, i):
if i is None:
return 0
return len(i) * self.itemsize
class _CompressionMethodsField(_CipherSuitesField):
def any2i_one(self, pkt, x):
if (isinstance(x, _GenericComp) or
isinstance(x, _GenericCompMetaclass)):
x = x.val
if isinstance(x, str):
x = self.s2i[x]
return x
###############################################################################
# ClientHello #
##################################################################
|
AXAz0r/apex-sigma-core
|
sigma/modules/help/donate.py
|
Python
|
gpl-3.0
| 1,698
| 0.0053
|
import discord
async
|
def donate(cmd, message, args):
if args:
if args[0] == 'mini':
mini = True
else:
mini = False
else:
mini = False
sigma_image = 'https://i.imgur.com/mGyqMe1.png'
sigma_title = 'Sigma Donation Information'
patreon_url = 'https:
|
//www.patreon.com/ApexSigma'
paypal_url = 'https://www.paypal.me/AleksaRadovic'
support_url = 'https://discordapp.com/invite/aEUCHwX'
if mini:
response = discord.Embed(color=0x1B6F5F, title=sigma_title)
donation_text = f'Care to help out? Come support Sigma on [Patreon]({patreon_url})!'
response.description = donation_text
else:
response = discord.Embed(color=0x1B6F5F)
donation_text = 'If you could spare some money, it would be amazing of you to support my work. '
donation_text += 'At the moment support from Sigma\'s users is my only source of income. '
donation_text += f'Come check out my [Patreon]({patreon_url}) and lend a hand! You also get some goodies! '
donation_text += f'Or if a subscription is too much commitment for you, how about [PayPal]({paypal_url})? '
donation_text += f'If you do end up being one of the lovely people to give support, '
donation_text += f'drop by our [Server]({support_url}) so we can properly thank you.'
donation_text += f'\n**Thank you to the {len(cmd.bot.info.donors.donors)} donors who have provided support!**'
response.set_author(name=sigma_title, icon_url=sigma_image, url=cmd.bot.cfg.pref.website)
response.add_field(name='Care to help out?', value=donation_text)
await message.channel.send(embed=response)
|
SpazioDati/python-dandelion-eu
|
tests/base.py
|
Python
|
gpl-2.0
| 4,228
| 0
|
""" tests can be run from the root dir with:
clean-pyc && \
APP_ID= APP_KEY= coverage run --source=. --branch `which nosetests` tests/* &&\
coverage html
"""
import os
from unittest import TestCase
from mock import patch
from dandelion import Datagem, DandelionException, DataTXT, default_config
from dandelion.base import BaseDandelionRequest
from dandelion.utils import AttributeDict
class TestDefaultConfiguration(TestCase):
def tearDown(self):
# cleanup default config
for key in ['app_id', 'app_key']:
if key in default_config:
del default_config[key]
def test_can_set_app_id(self):
default_config['app_id'] = os.environ['APP_ID']
with self.assertRaises(DandelionException) as context:
Datagem('administrative-regions')
self.assertEqual(
context.exception.message, 'Param "app_key" is required'
)
def test_can_set_app_key(self):
default_config['app_key'] = os.environ['APP_KEY']
with self.assertRaises(DandelionException) as context:
Datagem('administrative-regions')
self.assertEqual(
context.exception.message, 'Param "app_id" is required'
)
def test_can_authenticate(self):
with self.assertRaises(DandelionException) as context:
Datagem('administrative-regions')
self.assertEqual(
context.exception.message, 'Param "token" is required'
)
with self.assertRaises(DandelionException) as context:
DataTXT()
self.assertEqual(
context.exception.message, 'Param "token" is required'
)
default_config['app_id'] = os.environ['APP_ID']
default_config['app_key'] = os.environ['APP_KEY']
Datagem('administrative-regions')
DataTXT()
def test_cannot_set_other_params(self):
with self.assertRaises(DandelionException) as context:
default_config['foo'] = 42
self.assertEqual(
context.exception.message, "invalid config param: foo"
)
class TestAttributeDict(TestCase):
def test_simple(self):
obj = AttributeDict()
obj.name = 'foo'
self.assertEqual(obj.name, 'foo')
del obj.name
with self.assertRaises(KeyError):
print(obj.name)
class TestBaseClass(TestCase):
@stati
|
cmethod
def _make_class(require_auth=True, implement_abstract=False):
class TestCla
|
ss(BaseDandelionRequest):
REQUIRE_AUTH = require_auth
def _get_uri_tokens(self):
if implement_abstract:
return ['']
return super(TestClass, self)._get_uri_tokens()
return TestClass
def test_abstract_methods(self):
with self.assertRaises(NotImplementedError):
self._make_class(require_auth=False)()
def test_authentication_required(self):
with self.assertRaises(DandelionException) as context:
self._make_class(require_auth=True, implement_abstract=True)()
self.assertEqual(
context.exception.message, 'Param "token" is required'
)
obj = self._make_class(require_auth=True, implement_abstract=True)(
app_id='aa', app_key='bb'
)
with patch.object(obj, '_do_raw_request') as _do_raw_request:
_do_raw_request.return_value.ok = True
_do_raw_request.return_value.content = '{}'
obj.do_request(params=dict(foo='bar'))
_do_raw_request.assert_called_once_with(
'https://api.dandelion.eu',
{'foo': 'bar', '$app_id': 'aa', '$app_key': 'bb'},
'post'
)
def test_authentication_not_required(self):
obj = self._make_class(require_auth=False, implement_abstract=True)()
with patch.object(obj, '_do_raw_request') as _do_raw_request:
_do_raw_request.return_value.ok = True
_do_raw_request.return_value.content = '{}'
obj.do_request(params=dict(foo='bar'))
_do_raw_request.assert_called_once_with(
'https://api.dandelion.eu', dict(foo='bar'), 'post'
)
|
amaozhao/basecms
|
cms/test_utils/project/placeholderapp/models.py
|
Python
|
mit
| 3,065
| 0.001305
|
from cms.utils.urlutils import admin_reverse
from django.core.urlresolvers import reverse
from cms.utils import get_language_from_request
from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from cms.models.fields import PlaceholderField
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
@python_2_unicode_compatible
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
date_field = models.DateField(null=True)
placeholder = PlaceholderField('placeholder')
static_admin_url = ''
def __init__(self, *args, **kwargs):
super(Example1, self).__init__(*args, **kwargs)
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("example_detail", args=(self.pk,))
def set_static_url(self, request):
language = get_language_from_request(request)
|
if self.pk:
self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
return self.pk
def dynamic_url(self, request):
language = get_language_from_request(request)
return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
class TwoPlaceholderExample(models.Model):
|
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
class CharPksExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
slug = models.SlugField(u'char_1', max_length=255, primary_key=True)
placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
|
linebp/pandas
|
bench/better_unique.py
|
Python
|
bsd-3-clause
| 2,143
| 0
|
from __future__ import print_function
from pandas import DataFrame
from pandas.compat import range, zip
import timeit
setup = """
from pandas import Series
import pandas._tseries as _tseries
from pandas.compat import range
import random
import numpy as np
def better_unique(values):
uniques = _tseries.fast_unique(values)
id_map = _tseries.map_indices_buf(uniques)
labels = _tseries.get_unique_labels(values, id_map)
return uniques, labels
tot = 100000
def get_test_data(ngroups=100, n=tot):
unique_groups = range(ngroups)
random.shuffle(unique_groups)
arr = np.asarray(np.tile(unique_groups, n / ngroups), dtype=object)
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)],
dtype=object)
return arr
arr = get_test_data(ngroups=%d)
"""
group_sizes = [10, 100, 1000, 10000,
20000, 30000, 40000,
50000, 60000, 70000,
80000, 90000, 100000]
numbers = [100, 100, 50] + [10] * 10
numpy = []
wes = []
for sz, n in zip(group_sizes, numbers):
# wes_timer = timeit.Timer(stmt='better_unique(arr)',
# setup=setup % sz)
wes_timer = timeit.T
|
imer(stmt='_tseries.fast_unique(arr)',
setup=setup % sz)
numpy_timer = timeit.Timer(stmt='np.unique(arr)',
setup=setup % sz)
print(n)
num
|
py_result = numpy_timer.timeit(number=n) / n
wes_result = wes_timer.timeit(number=n) / n
print('Groups: %d, NumPy: %s, Wes: %s' % (sz, numpy_result, wes_result))
wes.append(wes_result)
numpy.append(numpy_result)
result = DataFrame({'wes': wes, 'numpy': numpy}, index=group_sizes)
def make_plot(numpy, wes):
pass
# def get_test_data(ngroups=100, n=100000):
# unique_groups = range(ngroups)
# random.shuffle(unique_groups)
# arr = np.asarray(np.tile(unique_groups, n / ngroups), dtype=object)
# if len(arr) < n:
# arr = np.asarray(list(arr) + unique_groups[:n - len(arr)],
# dtype=object)
# return arr
# arr = get_test_data(ngroups=1000)
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/faqwiz/faqconf.py
|
Python
|
apache-2.0
| 15,699
| 0.001784
|
"""FAQ Wizard customization module.
Edit this file to customize the FAQ Wizard. For normal purposes, you
should only have to change the FAQ section titles and the small group
of parameters below it.
"""
# Titles of FAQ sections
SECTION_TITLES = {
# SectionNumber : SectionTitle; need at least one entry
1: "General information and availability",
}
# Parameters you definitely want to change
SHORTNAME = "Generic" # FAQ name with "FAQ" omitted
PASSWORD = "" # Password for editing
OWNERNAME = "FAQ owner" # Name for feedback
OWNEREMAIL = "nobody@anywhere.org" # Email for feedback
HOMEURL = "http://www.python.org" # Related home page
HOMENAME = "Python home" # Name of related home page
RCSBINDIR = "/usr/local/bin/" # Directory containing RCS commands
# (must end in a slash)
# Parameters you can normally leave alone
MAXHITS = 10 # Max #hits to be shown directly
COOKIE_LIFETIME = 28*24*3600 # Cookie expiration in seconds
# (28*24*3600 = 28 days = 4 weeks)
PROCESS_PREFORMAT = 1 # toggle whether preformatted text
# will replace urls and emails with
# HTML links
# Markers appended to title to indicate recently change
# (may contain HTML, e.g. <IMG>); and corresponding
MARK_VERY_RECENT = " **" # Changed very recently
MARK_RECENT = " *" # Changed recently
DT_VERY_RECENT = 24*3600 # 24 hours
DT_RECENT = 7*24*3600 # 7 days
EXPLAIN_MARKS = """
<P>(Entries marked with ** were changed within the last 24 hours;
entries marked with * were changed within the last 7 days.)
<P>
"""
# Version -- don't change unless you edit faqwiz.py
WIZVERSION = "1.0.4" # FAQ Wizard version
import os, sys
if os.name in ['nt',]:
# On NT we'll probably be running python from a batch file,
# so sys.argv[0] is not helpful
FAQCGI = 'faq.bat' # Relative URL of the FAQ cgi script
# LOGNAME is not typically set on NT
os.environ[ 'LOGNAME' ] = "FAQWizard"
else:
# This parameter is normally overwritten with a dynamic value
FAQCGI = 'faqw.py' # Relative URL of the FAQ cgi script
FAQCGI = os.path.basename(sys.argv[0]) or FAQCGI
del os, sys
# Perl (re module) style regular expression to recognize FAQ entry
# files: group(1) should be the section number, group(2) should be the
# question number. Both should be fixed width so simple-minded
# sorting yields the right order.
OKFILENAME = r"^faq(\d\d)\.(\d\d\d)\.htp$"
# Format to construct a FAQ entry file name
NEWFILENAME = "faq%02d.%03d.htp"
# Load local customizations on top of the previous parameters
try:
from faqcust import *
except ImportError:
pass
# Calculated parameter names
COOKIE_NAME = SHORTNAME + "-FAQ-Wizard" # Name used for Netscape cookie
FAQNAME = SHORTNAME + " FAQ" # Name of the FAQ
# ----------------------------------------------------------------------
# Anything below this point normally needn't be changed; you would
# change this if you were to create e.g. a French translation or if
# you just aren't happy with the text generated by the FAQ Wizard.
# Most strings here are subject to substitution (string%dictionary)
# RCS commands
import os
if os.name in ['nt', ]:
SH_RLOG = RCSBINDIR + "rlog %(file)s < NUL"
SH_RLOG_H = RCSBINDIR + "rlog -h %(file)s < NUL"
SH_RDIFF = RCSBINDIR + "rcsdiff -r%(prev)s -r%(rev)s %(file)s < NUL"
SH_REVISION = RCSBINDIR + "co -p%(rev)s %(file)s < NUL"
### Have to use co -l, or the file is not marked rw on NT
SH_LOCK = RCSBINDIR + "co -l %(file)s < NUL"
SH_CHECKIN = RCSBINDIR + "ci -u %(file)s < %(tfn)s"
else:
SH_RLOG = RCSBINDIR + "rlog %(file)s </dev/null 2>&1"
SH_RLOG_H = RCSBINDIR + "rlog -h %(file)s </dev/null 2>&1"
SH_RDIFF = RCSBINDIR + "rcsdiff -r%(prev)s -r%(rev)s %(file)s </dev/null 2>&1"
SH_REVISION = RCSBINDIR + "co -p%(rev)s %(file)s </dev/null 2>&1"
SH_LOCK = RCSBINDIR + "rcs -l %(file)s </dev/null 2>&1"
SH_CHECKIN = RCSBINDIR + "ci -u %(file)s <%(tfn)s 2>&1"
del os
# Titles for various output pages (not subject to substitution)
T_HOME = FAQNAME + " Wizard " + WIZVERSION
T_ER
|
ROR = "Sorry, an error occurred"
T_ROULETTE = FAQNAME + " Roulette"
T_ALL = "The Whole " + FAQNAME
T_INDEX = FAQNAME + " Index"
T_SEARCH = FAQNAME + " Search Results"
T_RECENT = "What's New in the " + FAQNAME
T_SHOW = FAQNAME + " Entry"
T_LOG = "RCS log for %s entry" % FAQNAME
T_REVISION = "RCS revision for %s entry" % FAQNAME
T_DIFF = "RCS diff for %s entry" % FAQNAME
T_ADD = "Add an entry to the " + FAQNAME
T_DELET
|
E = "Deleting an entry from the " + FAQNAME
T_EDIT = FAQNAME + " Edit Wizard"
T_REVIEW = T_EDIT + " - Review Changes"
T_COMMITTED = T_EDIT + " - Changes Committed"
T_COMMITFAILED = T_EDIT + " - Commit Failed"
T_CANTCOMMIT = T_EDIT + " - Commit Rejected"
T_HELP = T_EDIT + " - Help"
# Generic prologue and epilogue
PROLOGUE = '''
<HTML>
<HEAD>
<TITLE>%(title)s</TITLE>
</HEAD>
<BODY
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#AA0000"
VLINK="#906A6A">
<H1>%(title)s</H1>
'''
EPILOGUE = '''
<HR>
<A HREF="%(HOMEURL)s">%(HOMENAME)s</A> /
<A HREF="%(FAQCGI)s?req=home">%(FAQNAME)s Wizard %(WIZVERSION)s</A> /
Feedback to <A HREF="mailto:%(OWNEREMAIL)s">%(OWNERNAME)s</A>
</BODY>
</HTML>
'''
# Home page
HOME = """
<H2>Search the %(FAQNAME)s:</H2>
<BLOCKQUOTE>
<FORM ACTION="%(FAQCGI)s">
<INPUT TYPE=text NAME=query>
<INPUT TYPE=submit VALUE="Search"><BR>
<INPUT TYPE=radio NAME=querytype VALUE=simple CHECKED>
Simple string
/
<INPUT TYPE=radio NAME=querytype VALUE=regex>
Regular expression
/<BR>
<INPUT TYPE=radio NAME=querytype VALUE=anykeywords>
Keywords (any)
/
<INPUT TYPE=radio NAME=querytype VALUE=allkeywords>
Keywords (all)
<BR>
<INPUT TYPE=radio NAME=casefold VALUE=yes CHECKED>
Fold case
/
<INPUT TYPE=radio NAME=casefold VALUE=no>
Case sensitive
<BR>
<INPUT TYPE=hidden NAME=req VALUE=search>
</FORM>
</BLOCKQUOTE>
<HR>
<H2>Other forms of %(FAQNAME)s access:</H2>
<UL>
<LI><A HREF="%(FAQCGI)s?req=index">FAQ index</A>
<LI><A HREF="%(FAQCGI)s?req=all">The whole FAQ</A>
<LI><A HREF="%(FAQCGI)s?req=recent">What's new in the FAQ?</A>
<LI><A HREF="%(FAQCGI)s?req=roulette">FAQ roulette</A>
<LI><A HREF="%(FAQCGI)s?req=add">Add a FAQ entry</A>
<LI><A HREF="%(FAQCGI)s?req=delete">Delete a FAQ entry</A>
</UL>
"""
# Index formatting
INDEX_SECTION = """
<P>
<HR>
<H2>%(sec)s. %(title)s</H2>
<UL>
"""
INDEX_ADDSECTION = """
<P>
<LI><A HREF="%(FAQCGI)s?req=new&section=%(sec)s">Add new entry</A>
(at this point)
"""
INDEX_ENDSECTION = """
</UL>
"""
INDEX_ENTRY = """\
<LI><A HREF="%(FAQCGI)s?req=show&file=%(file)s">%(title)s</A>
"""
LOCAL_ENTRY = """\
<LI><A HREF="#%(sec)s.%(num)s">%(title)s</A>
"""
# Entry formatting
ENTRY_HEADER1 = """
<HR>
<H2><A NAME="%(sec)s.%(num)s">%(title)s</A>\
"""
ENTRY_HEADER2 = """\
</H2>
"""
ENTRY_FOOTER = """
<A HREF="%(FAQCGI)s?req=edit&file=%(file)s">Edit this entry</A> /
<A HREF="%(FAQCGI)s?req=log&file=%(file)s">Log info</A>
"""
ENTRY_LOGINFO = """
/ Last changed on %(last_changed_date)s by
<A HREF="mailto:%(last_changed_email)s">%(last_changed_author)s</A>
"""
# Search
NO_HITS = """
No hits.
"""
ONE_HIT = """
Your search matched the following entry:
"""
FEW_HITS = """
Your search matched the following %(count)s entries:
"""
MANY_HITS = """
Your search matched more than %(MAXHITS)s entries.
The %(count)s matching entries are presented here ordered by section:
|
sasmita/upm
|
examples/python/grovewfs.py
|
Python
|
mit
| 2,489
| 0.002009
|
#!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_grovewf
|
s as upmGrovewfs
def main():
# Instantiate a Grove Water Flow Sensor on digital pin D2
myWaterFlow = upmGrovewfs.GroveWFS(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit,
|
# including functions from myWaterFlow
def exitHandler():
myWaterFlow.stopFlowCounter()
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# set the flow counter to 0 and start counting
myWaterFlow.clearFlowCounter()
myWaterFlow.startFlowCounter()
while (1):
# we grab these (millis and flowCount) just for display
# purposes in this example
millis = myWaterFlow.getMillis()
flowCount = myWaterFlow.flowCounter()
fr = myWaterFlow.flowRate()
# output milliseconds passed, flow count, and computed flow rate
outputStr = "Millis: {0} Flow Count: {1} Flow Rate: {2} LPM".format(
millis, flowCount, fr)
print(outputStr)
time.sleep(2)
if __name__ == '__main__':
main()
|
fbuitron/FBMusic_ML_be
|
BATCH/PlaylistAPI.py
|
Python
|
apache-2.0
| 1,427
| 0.006307
|
from Networking import Networking
from Model import Playlist
from SpotifyAPI import SpotifyAPI
import Security
import json
class PlaylistAPI(SpotifyAPI):
base_url = "https://api.spotify.com"
def __init__(self, categoryID):
super(PlaylistAPI, self).__init__()
self.list_of_playlist = []
self.categoryID = categoryID
def getPlaylists(self):
def success(json_str):
super(PlaylistAPI, self).success(json_str)
json_obj = json.loads(json_str)
list_of_items
|
= json_obj['playlists']['items']
for item_index in range(len(list_of_items)):
playlist_json = json_obj['playlists']['items'][item_index]
|
p = Playlist.Playlist(playlist_json)
self.list_of_playlist.append(p)
def failure(error):
print(error.content)
self.stillPaging = False
if self.hasPaging():
self.stillPaging = True
i = 0
while(self.stillPaging):
get = Networking.NetworkGET(self.base_url, self.getEndpoint())
params = self.getParameters()
get.get(success, failure, params)
def hasPaging(self):
return True
def getEndpoint(self):
return "/v1/browse/categories/#category#/playlists".replace("#category#",self.categoryID)
def getRootElement(self):
return "playlists"
|
gleseur/room-status
|
detector/daemon.py
|
Python
|
mit
| 1,629
| 0.006139
|
u"""
This is the daemon that must be launched in order to detect motion
and launch signals.
"""
from __future__ import unicode_literals
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
from detection import PirDetector
from busy_processor import RoomBusyStatus
import settings
# Importing motion listeners
from listener.api import ApiListener
from listener.switch import Light
import listener.raspberry_out
TIME_TO_SLEEP = 0.01
def initialize_detection_pairs():
detectors = []
room_statuses = []
for pair_name, values in settings.D
|
ETECTION_PAIRS.iteritems():
print "Initializing pair {}".format(pair_name)
detector = PirDetector(values["pir"], pair_name)
detector.setup()
detectors.append(detector)
# Subscribing listeners
rbs = RoomBusyStatus(pair_name, values["free_time"], values["lock_time"], detector)
room_statuses.append(rbs)
Light(values["light"], pair_name, rbs)
ApiListener(pair_name, value
|
s["room_id"], rbs)
return detectors, room_statuses
def run_daemon():
detectors, room_statuses = initialize_detection_pairs()
while True:
for detector in detectors:
detector.detect_motion()
for room_status in room_statuses:
room_status.check_idle()
time.sleep(TIME_TO_SLEEP)
if __name__ == "__main__":
try:
run_daemon()
except KeyboardInterrupt:
print "Cleaning up GPIO"
GPIO.cleanup()
print "Quitting ..."
except Exception as e:
print "Exception {}, cleaning up GPIO".format(e)
GPIO.cleanup()
raise
|
ehenneken/adsws
|
adsws/tests/test_factory.py
|
Python
|
gpl-2.0
| 1,777
| 0.011255
|
from adsws.testsuite import make_test_suite, \
run_test_suite, AdsWSAppTestCase, FlaskAppTestCase, AdsWSTestCase
import os
import inspect
import tempfile
class FactoryTest(FlaskAppTestCase):
@property
def config(self):
return {
'SQLALCHEMY_DATABASE_URI' : 'sqlite://',
'FOO': 'bar'
}
def test_factory(self):
self.assertEqual(self.app.config.get('FOO'), 'bar', "The app didn't get property: foo")
rootf = os.path.realpath(os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '../../adsws'))
self.assertEqual(self.app.root_path, rootf, "root_path is not correct")
self.assertEqual(self.app.instance_path, os.path.realpath(os.path.join(rootf, '../instance')), "instance_path is not correct")
class FactoryTestCustomInstanceDir(FlaskAppTestCase):
@property
def config(self):
if not self._config.has_key('instance_path'):
|
instance_path = tempfile.mkdtemp()
with open(os.path.join(instance_path, 'loca
|
l_config.py'), 'w') as fo:
fo.write("BAR='baz'\n")
self._config['instance_path'] = instance_path
return self._config
def test_custom_config(self):
rootf = os.path.realpath(os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '../../adsws'))
self.assertEqual(self.app.config.get('BAR'), 'baz')
self.assertEqual(self.app.root_path, rootf, "root_path is not correct")
self.assertEqual(self.app.instance_path, self.config['instance_path'])
TEST_SUITE = make_test_suite(FactoryTest, FactoryTestCustomInstanceDir)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
neutronpy/neutronpy
|
neutronpy/data/analysis.py
|
Python
|
mit
| 8,726
| 0.00149
|
# -*- coding: utf-8 -*-
import numbers
import numpy as np
from ..constants import BOLTZMANN_IN_MEV_K
from ..energy import Energy
class Analysis(object):
r"""Class containing methods for the Data class
Attributes
----------
detailed_balance_factor
Methods
-------
integrate
position
width
scattering_function
dynamic_susceptibility
estimate_background
get_keys
get_bounds
"""
@property
def detailed_balance_factor(self):
r"""Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction)
"""
return 1. - np.exp(-self.Q[:, 3] / BOLTZMANN_IN_MEV_K / self.temp)
def integrate(self, bounds=None, background=None, hkle=True):
r"""Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
|
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
|
result : float
The integrated intensity either over all data, or within
specified boundaries
"""
result = 0
for key in self.get_keys(hkle):
result += np.trapz(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background),
np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result
def position(self, bounds=None, background=None, hkle=True):
r"""Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz(self.data[key][self.get_bounds(bounds)] *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def width(self, bounds=None, background=None, fwhm=False, hkle=True):
r"""Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz((self.data[key][self.get_bounds(bounds)] -
self.position(bounds, background, hkle=False)[key]) ** 2 *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
if fwhm:
result += (np.sqrt(np.squeeze(_result)) * 2. * np.sqrt(2. * np.log(2.)),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def scattering_function(self, material, ei):
r"""Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf
"""
ki = Energy(energy=ei).wavevector
kf = Energy(energy=ei - self.e).wavevector
return 4 * np.pi / material.total_scattering_cross_section * ki / kf * self.detector
def dynamic_susceptibility(self, material, ei):
r"""Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor
"""
return self.scattering_function(material, ei) * self.detailed_balance_factor
def estimate_background(self, bg_params):
r"""Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray
"""
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif bg_params['type'] == 'constant':
return bg_params['value']
elif bg_params['type'] == 'percent':
inten = self.intensity[self.intensity >= 0.]
Npts = int(inten.size * (bg_params['value'] / 100.))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif bg_params['type'] == 'minimum':
return min(self.intensity)
else:
return 0
def get_bounds(self, bounds):
r"""Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices
"""
if bounds is not None:
return np.where(bounds)
else:
return np.wh
|
PyIran/website
|
project/database.py
|
Python
|
gpl-3.0
| 2,785
| 0.023339
|
# coding: utf-8
import datetime
from sqlalchemy.engine import create_engine
from sqlalchemy.ext.declarative.api import declarative_base
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
import imp
from migrate.versioning import api
engine = create_engine('sqlite:///pyiran.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATA
|
BASE_URI = 'sqlite:///pyiran.db'
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
Base = declarative_base()
Base.query = db_session.query_property()
|
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
Base.metadata.create_all(bind=engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
newdb()
def migrate():
migration = SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, Base.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def newdb():
from apps.user.models import Profile, Roles, User_Role
session = db_session()
session._model_changes = {}
admin=Profile(
username="admin",
password="pbkdf2:sha1:1000$FWFdPAeH$6cf64b6fa1308fb1bc8baf799d13100f467040d7",
group_list="admin"
)
session.add(admin)
role=Roles(role_name="pyiran", parent=-1, description="developer", url="")
session.add(role)
role=Roles(role_name="admin", parent=0, description="sysadmin", url="")
session.add(role)
role=Roles(role_name="login", parent=0, description="enabeled user", url="")
session.add(role)
userrole=User_Role(user=1,role=1)
session.add(userrole)
userrole=User_Role(user=1,role=2)
session.add(userrole)
userrole=User_Role(user=1,role=3)
session.add(userrole)
session.commit()
|
wubr2000/googleads-python-lib
|
examples/dfa/v1_20/add_advertiser_user_filter.py
|
Python
|
apache-2.0
| 2,962
| 0.005402
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds a filter to a user profile, limiting its access to certain advertisers.
To get user IDs, run get_users.py. To get advertiser IDs, run
get_advertisers.py.
A similar pattern can be applied to set filters limiting site, user role,
and/or campaign access for any user. To get the Filter Criteria Type ID, run
get_user_filter_types.py.
The LoadFromStorage method is pulling credentials and properties from a
"g
|
oogleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
USER_ID = 'INSERT_USER_ID_HERE'
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE
|
'
def main(client, user_id, advertiser_id):
# Initialize appropriate service.
user_service = client.GetService(
'user', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Retrieve the user who is to be modified.
user = user_service.getUser(user_id)
# Create and configure a user filter.
advertiser_filter = {
# The following field has been filled in to make a filter that allows a
# user to access only the assigned objects.
# This value was determined using get_user_filter_types.py.
'userFilterCriteriaId': '2',
# Because this filter used the criteria type "Assigned" it is necessary
# to specify what advertisers this user has access to. This next step
# would be skipped for the criteria types "All" and "None".
# Create a list of object filters to represent each object the user has
# access to. Since this is an advertiser filter, the list elements
# represent an advertiser each. The size of the list will need to match
# the total number of advertisers the user is assigned.
'objectFilters': [{
'id': advertiser_id
}]
}
# Add the filter to the user.
user['advertiserUserFilter'] = advertiser_filter
# Save the changes made and display a success message.
result = user_service.saveUser(user)
if result:
print 'User with ID \'%s\' was modified.' % result['id']
else:
print 'No user was modified.'
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, USER_ID, ADVERTISER_ID)
|
translate/translate
|
translate/storage/test_zip.py
|
Python
|
gpl-2.0
| 2,697
| 0.000371
|
"""Tests for the zip storage module"""
import os
from zipfile import ZipFile
from translate.storage import zip
class TestZIPFile:
"""A test class to test the zip class that provides the directory interface."""
def setup_method(self, method):
"""sets up a test directory"""
print("setup_method called on", self.__class__.__name__)
self.testzip = "%s_testzip.zip" % (self.__class__.__name__)
self.cleardir(self.testzip)
self.zip = ZipFile(self.testzip, mode="w")
def teardown_method(self, method):
"""removes the attributes set up by setup_method"""
self.zip.close()
self.cleardir(self.testzip)
def cleardir(self, dirname):
"""removes the given directory"""
if os.path.exists(self.testzip):
os.remove(self.testzip)
assert not os.path.exists(self.testzip)
def touchfiles(self, dir, filenames, content="", last=False):
for filename in filenames:
if dir:
self.zip.writestr(os.path.join(dir, filename), content)
else:
self.zip.writestr(filename, content)
if last:
self.zip.close()
def mkdir(self, dir):
"""Makes
|
a directory inside self.testzip."""
pass
def test_cre
|
ated(self):
"""test that the directory actually exists"""
print(self.testzip)
assert os.path.isfile(self.testzip)
def test_basic(self):
"""Tests basic functionality."""
files = ["a.po", "b.po", "c.po"]
self.touchfiles(None, files, last=True)
d = zip.ZIPFile(self.testzip)
try:
filenames = [name for dir, name in d.getfiles()]
assert filenames == files
finally:
d.close()
def test_structure(self):
"""Tests a small directory structure."""
files = ["a.po", "b.po", "c.po"]
self.touchfiles(self.testzip, files)
self.mkdir("bla")
self.touchfiles(os.path.join(self.testzip, "bla"), files, last=True)
d = zip.ZIPFile(self.testzip)
try:
filenames = [name for dir, name in d.getfiles()]
assert filenames == files * 2
finally:
d.close()
def test_getunits(self):
"""Tests basic functionality."""
files = ["a.po", "b.po", "c.po"]
posource = """msgid "bla"\nmsgstr "blabla"\n"""
self.touchfiles(self.testzip, files, posource, last=True)
d = zip.ZIPFile(self.testzip)
try:
for unit in d.getunits():
assert unit.target == "blabla"
assert len(d.getunits()) == 3
finally:
d.close()
|
axaxs/Cnchi
|
src/config.py
|
Python
|
gpl-3.0
| 1,966
| 0.03001
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2013 Cinnarch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
installer_settings = { \
'CNCHI_DIR' : '/usr/sh
|
are/cnchi/', \
'UI_DIR' : '/usr/share/cnchi/ui/', \
'DATA_DIR' : '/usr/share/cnchi/data/', \
'TMP_DIR' : '/tmp', \
'language_name' : '', \
'language_code' : '', \
'locale' : '', \
'keyboard_layout' : '', \
'keyboard_variant' : '', \
'timezone_human_zone' : '', \
'timezone_country' : '', \
'ti
|
mezone_zone' : '', \
'timezone_human_country' : '', \
'timezone_comment' : '', \
'timezone_latitude' : 0, \
'timezone_longitude' : 0, \
'activate_ntp' : 1, \
'partition_mode' : 'm', \
'auto_device' : '/dev/sda', \
'log_file' : '/tmp/cnchi.log', \
'fullname' : '', \
'hostname' : 'cinnarch', \
'username' : '', \
'password' : '', \
'require_password' : True, \
'encrypt_home' : False }
|
nburn42/tensorflow
|
tensorflow/python/framework/random_seed.py
|
Python
|
apache-2.0
| 5,903
| 0.003219
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
@tf_export('get_seed')
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
@{tf.set_random_seed}.
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
eager = context.executing_eagerly()
if eager:
global_seed = context.global_seed()
else:
global_seed = ops.get_default_graph().seed
if global_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
if eager:
op_seed = context.internal_operation_seed()
else:
op_seed = ops.get_default_graph()._last_id
seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
# Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
# be unexpected since Python docs say nondeterminism is (None, None).
if seeds == (0, 0):
return (0, _MAXINT32)
return seeds
@tf_export('set_random_seed')
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set
|
_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will gen
|
erate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
if context.executing_eagerly():
context.set_global_seed(seed)
else:
ops.get_default_graph().seed = seed
|
ericshawlinux/bitcoin
|
test/functional/p2p_sendheaders.py
|
Python
|
mit
| 26,656
| 0.002251
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it
|
. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests wit
|
h null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.mininode import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
|
gordon-zhao/Chrome_bookmarks_to_json
|
src/python/html2json.py
|
Python
|
mit
| 4,599
| 0.006741
|
# coding: utf-8
import json
import sys
import codecs
python3 = False
if sys.version_info[0] == 3: #Python 3
python3 = True
if not python3:
reload(sys)
sys.setdefaultencoding("utf-8")
input = raw_input
def parseHTML(file_path):
fo = codecs.open(file_path, encoding='utf-8', mode='r+')
original = fo.read()
fo.close()
parsed = {}
current_location = []
current_location_name = []
current_index = 0
header = original.find("<H1>")
if header > -1:
ender = original.find("</H1>")
parsed[original[header+4:ender]] = {"link":[]}
folder_begin = original.find("<DL><p>")
if folder_begin > -1:
current_location.append(parsed[original[header+4:ender]])
current_location_name.append(original[header+4:ender])
original = original[folder_begin+7:]
block = False
original_length = len(original)
while current_index+1<original_length:
folder_title_header = original.find("<DT><H3", current_index)
folder_title_ender = original.find("</H3>", current_index)
folder_header = original.find("<DL><p>", current_index)
folder_ender = original.find("</DL><p>", current_index)
bookmark_header = original.find("<DT><A", current_index)
bookmark_ender = original.find("</A>", current_index)
lists = [folder_title_header,folder_title_ender
|
,folder_header,folder_ender,bookmark_header,bookmark_ender]
for i in range(6):
# Prevent the min() choose the not exists value -1
if lists[i] == -1:
lists[i] = original_length + 1
nearest_element = min(lists)
if lists[3] + 8 >= ori
|
ginal_length: # If the folder end mark plus its length is equal to the raw file length, then escape the loop, in order to prevent the value -1 returned by find() caused the loop go over again
break
if nearest_element == folder_title_header and not block:
if not folder_title_ender > -1 and not folder_title_header+1 > original_length:
block = True
continue
folder_title_header = original.find(">",folder_title_header+7)
upper_folder = current_location[-1]
upper_folder[original[folder_title_header+1:folder_title_ender]] = {"link": []}
current_location.append(upper_folder[original[folder_title_header+1:folder_title_ender]])
current_location_name.append(original[folder_title_header+1:folder_title_ender])
current_index = folder_title_ender + 5
print("Working on: {}".format("/".join(current_location_name)))
continue
if nearest_element == folder_header:
current_index = folder_header + 7
continue
if nearest_element == folder_ender and folder_ender + 8 < original_length:
current_location.pop()
current_location_name.pop()
current_index = folder_ender + 8
continue
if nearest_element == bookmark_header:
link_header = original.find("HREF=", bookmark_header)
if link_header > -1:
link_ender = original.find('"', link_header + 6)
bookmark_title_header = original.find(">", link_header)
current_location[-1]["link"].append([original[link_header+6:link_ender], original[bookmark_title_header+1:bookmark_ender]])
current_index = bookmark_ender + 4
continue
print("Finished parsing bookmarks!")
return parsed.copy()
def writeJSON(result, path_to_save=None, indent=4,encoding = "utf-8", mode="w+"):
if not path_to_save:
print("JSON saving path not found! Skipping...")
return 1
files = codecs.open(path_to_save,encoding=encoding,mode=mode)
if not python3:
files.write(json.dumps(result,indent=indent).decode('unicode-escape'))
elif python3:
msg = json.dumps(result, indent=indent)
msg = bytes(msg, 'utf-8')
files.write(msg.decode('unicode-escape'))
files.flush()
files.close()
print("JSON file written to path: {}".format(path_to_save))
if __name__=="__main__":
if len(sys.argv)==3:
args = [sys.argv[1],sys.argv[2]]
else:
args = []
args.append(input("Path to Exported Bookmark: "))
args.append(input("Export JSON to: "))
try:
result = parseHTML(args[0])
writeJSON(result, args[1])
except:
sys.stderr.write(sys.exc_info())
finally:
input("Press [Enter] to continue...")
|
EmuKit/emukit
|
tests/emukit/bayesian_optimization/test_multipoint_expected_improvement.py
|
Python
|
apache-2.0
| 2,394
| 0.002924
|
import GPy
import numpy as np
from scipy.optimize import check_grad
from emukit.bayesian_optimization.acquisitions import MultipointExpectedImprovement
from emukit.model_wrappers import GPyModelWrapper
# Tolerance needs to be quite high since the q-EI is also an approximation.
TOL = 5e-3
# Tolerance for the gradient can be smaller since the approximation is not stochastic
TOL_GRAD = 1e-6
# Tolerance for the gradient of the fast method needs to be higher as it is an approximation of an approximation
TOL_GRAD_FAST = 1e-4
def test_acquisition_multipoint_expected_improvement():
"""
Check if the q-EI acquisition function produces similar results as sampling
"""
# Batch size
k = 2
# Set mean to one
mu = np.ones((k))
# Sample random 3 dimensional covarinace matrix:
L = np.tril(np.random.sample((k, k)))
Sigma = L @ L.T
# Set current minimum to a random number smaller than the mean:
current_minimum = np.random.uniform()
# Compute acquisition:
qei_analytic, _, _ = MultipointExpectedImprovement(None)._get_acquisition(mu, Sigma, current_minimum)
acq_fast = MultipointExpectedImprovement(None, fast_compute=True, eps=1e-3)
qei_analytic_fast, _, _ = acq_fast._get_acquisition(mu, Sigma, current_minimum)
# Reference with sampling
N = 1000000
samples = np.random.multivariate_normal(mu, Sigma, size=N)
qei_sampled = current_minimum - np.min(samples, axis=1)
qei_sampled = sum(qei_sampled[qei_sampled > 0]) / float(N)
assert np.abs(qei_sampled - qei_analytic) < TOL
assert np.abs(qei_analytic_fast - qei_analytic) < TOL
def test_acquisition_gradient_multipoint_expected_improvement():
"""
Check the q-EI acquisition function gradients with numeric differentiation
"""
x_init = np.random.rand(3,
|
1)
y_init = np.random.rand(3, 1)
# Make GPy model
gpy_model = GPy.models.GPRegression(x_init, y_init)
model = GPyModelWrapper(gpy_model)
x0 = np.array([0.45, 0.55])
_check_grad(MultipointExpectedImprovement(model), TOL_GRAD, x0)
_check_grad(MultipointExpectedImprovement(model, fast_compute=True, eps=1e-3), TOL_GRAD_FAST, x0)
de
|
f _check_grad(lp, tol, x0):
grad_error = check_grad(
lambda x: lp.evaluate(x[:, None]).flatten(), lambda x: lp.evaluate_with_gradients(x[:, None])[1].flatten(), x0
)
assert np.all(grad_error < tol)
|
LLNL/spack
|
var/spack/repos/builtin/packages/perl-inline/package.py
|
Python
|
lgpl-2.1
| 603
| 0.004975
|
# Copyright 2013-2021 Lawrenc
|
e Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlInline(PerlPackage):
"""Write Perl Subroutines in Other Programming Languages"""
homepage = "https://metacpan.org/pod/Inline"
url = "http://search.cpan.org/CPAN/authors/id
|
/I/IN/INGY/Inline-0.80.tar.gz"
version('0.80', sha256='7e2bd984b1ebd43e336b937896463f2c6cb682c956cbd2c311a464363d2ccef6')
depends_on('perl-test-warn', type=('build', 'run'))
|
CaptainDesAstres/Frames-Animated-By-Curve
|
single_track/Combination.py
|
Python
|
gpl-3.0
| 4,675
| 0.056769
|
import bpy
from functions import *
class Combination():
'''A class containing all properties and methods
relative to combination settings for
Curve To Frame addon'''
def update_curves( self, context ):
'''method that must be over ride: update curve when settings have been changed'''
type(self).update_curves( self, context )
######################################
## combination settings ##
######################################
# method used to combine amplitude and peaks curve
combination_mode = bpy.props.EnumProperty(
name = 'Mode',
description = 'the way to combine amplitude and peaks curve',
default = 'ignore_peaks',
items = [
# (identifier, name,
# description, number)
('multiply', 'Peaks Curve Multiplied by amplitude',
'peaks is multiplied by \
amplitude percentage of maxi', 0),
('clamp_key', 'Peaks Keyframe Clamped to amplitude',
'peaks keyframe is clamped by amplitude', 1),
('clamp_curve', 'Peaks Curve Clamped to amplitude',
'all peaks value is clamped by amplitude', 2),
('ignore_amplitude', 'Only use peaks curve',
'Only use peaks curve', 3),
('ignore_peaks', 'Only use amplitude curve',
'Only use amplitude curve', 4)
],
update = update_curves
)
# combination of net amplitude and peaks curves
combination = bpy.props.FloatProperty(
name = "combination",
description = "Only to visualize the combination of \
peaks and amplitude curve curve. Can't \
be edit manually: use rate and amplitude settings.",
default = 0,
min = 0,
max = 1)
def update_combination_curve(
self,
clip,
context,
amplitude_net_curve,
peaks_curve):
'''update clip combination curve'''
# get combination mode curve
combination_enum = clip.curve_to_frame.bl_rna.\
properties['combination_mode'].enum_items
combination_mode = combination_enum.find( clip.curve_to_frame.combination_mode )
combination_mode_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination_mode')
# get and initialize combination curve
combination_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination')
if combination_curve is not None:
hide = combination_curve.hide
clip.animation_data.action.fcurves.remove(combination_curve)
else:
hide = True
clip.animation_data.action.fcurves.new(
'curve_to_frame.combination')
combination_curve = get_fcurve_by_data_path(clip,
'curve_to_frame.combination')
# get rate curve
rate_curve = get_fcurve_by_data_path(clip, 'curve_to_frame.rate')
# loop only on peak curve keyframe
for keyframe in peaks_curve.keyframe_points:
# get peaks keyframe value and frame
frame = keyframe.co[0]
value = max( min(1, keyframe.co[1]), 0 )
# get combination_mode at this frame
if combination_mode_curve is not None:
combination_mode = combination_mode_curve.evaluate(frame)
# generate keyframe
if combination_mode != 3 : # «combination mode == multiply or clamp
value = value * amplitude_net_curve.evaluate(frame)
if combination_mode != 4 :
combination_curve.keyframe_points.insert(frame, value)
combination_curve.keyframe_points[-1].interpolation = 'LINEAR'
# loop for all frame
end = max( peaks_curve.keyframe_points[-1].co[0],
context.scene.frame_end )
frame = start = context.scene.frame_start
while frame <= end:
# get combination_mode at this frame
if combination_mode_curve is not None:
combination_mode = combination_mode_curve.evaluate(frame)
if combination_mode == 0 : # combination mode is «multiply»
value = max( min( 1, peaks_curve.evaluate(frame) ), 0 )\
* amplitude_net_curve.evaluate(frame)
combination_curve.keyframe
|
_points.insert(frame, value)
elif combination_mode == 2: # combination mode is «clamp_curve»
combination_curve.keyframe_points.insert(
frame,
max(
min (
amplitude_net_curve.evaluate(frame),
peaks_curve.evaluate(frame),
1
),
0
)
)
elif combination_mode == 4:
# combination mode is «ignore peaks»
com
|
bination_curve.keyframe_points.insert(
frame,
amplitude_net_curve.evaluate(frame)
)
combination_curve.keyframe_points[-1].interpolation = 'LINEAR'
# next frame
frame += 1
#erase keyframe on flat section
avoid_useless_keyframe( combination_curve )
# prevent curve edition
combination_curve.lock = True
combination_curve.hide = hide
return combination_curve
|
praekeltfoundation/ndoh-hub
|
scripts/migrate_to_whatsapp_templates/tests/test_prebirth5.py
|
Python
|
bsd-3-clause
| 1,231
| 0.001625
|
import unittest
from scripts.migrate_to_whatsapp_templates.prebirth5 import Prebirth5Migration
class Testprebirth5(unittest.TestCase):
def setUp(self):
self.prebirth5 = Prebirth5Migration()
def test_sequence_number_to_weeks(self):
"""
Given a certain sequence number for the prebirth 1 messageset, it should return
the correct number of weeks pregnant
"""
|
self.assertEqual(self.prebirth5.sequence_number_to_weeks(1), 38)
self.assertEqual(self.prebirth5.sequence_number_to_weeks(2), 38)
self.assertEqual(self.prebirth5.sequence_number_to_weeks(3), 38)
self.assertEqual(self.prebirth5.se
|
quence_number_to_weeks(5), 38)
self.assertEqual(self.prebirth5.sequence_number_to_weeks(14), 40)
self.assertEqual(self.prebirth5.sequence_number_to_weeks(15), 40)
def test_get_template_variables(self):
message = {
"id": "1",
"messageset": "2",
"sequence_number": "3",
"lang": "zul_ZA",
"text_content": "test",
"binary_content": "",
"metadata": "{}",
}
self.assertEqual(self.prebirth5.get_template_variables(message), ["38", "test"])
|
vellonce/PizzaFria
|
pizzafria/localsettings.py
|
Python
|
gpl-2.0
| 585
| 0.001709
|
# -*- coding: utf-8 -*-
__author__ = 'iwdev1'
from .settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'pizza_db',
'USER': 'root',
'PASSWORD': 'A8d32e08.',
'HOST': '',
'PORT': '',
}
}
ALLOWED_HOSTS = []
STATIC_ROOT = ''
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths,
|
not relative paths.
o
|
s.path.join(PROJECT_PATH, 'templates/static/'),
)
|
dokterbob/satchmo
|
satchmo/apps/product/modules/downloadable/migrations/0001_split.py
|
Python
|
bsd-3-clause
| 18,953
| 0.007862
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
('product', '0010_add_discountable_categories'),
)
def forwards(self, orm):
db.rename_table('product_downloadableproduct', 'downloadable_downloadableproduct')
# check if the table exists; might be a fresh, post 0.9 installation
try:
from django.db import connection
cursor = connection.cursor()
if not cursor:
raise Exception
table_names = connection.introspection.get_table_list(cursor)
except:
raise Exception("unable to determine if the table 'shop_downloadlink' exists")
else:
if not 'shop_downloadlink' in table_names:
# create the table
# create commands were obtained from a fresh --initial migration
db.create_table('downloadable_downloadlink', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('downloadable_product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['downloadable.DownloadableProduct'])),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shop.Order'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('num_attempts', self.gf('django.db.models.fields.IntegerField')()),
('time_stamp', self.gf('django.db.models.fields.DateTimeField')()),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('downloadable', ['DownloadLink'])
else:
db.rename_table('shop_downloadlink', 'downloadable_downloadlink')
def backwards(self, orm):
db.rename_table('downloadable_downloadableproduct', 'product_downloadableproduct')
db.rename_table('downloadable_downloadlink', 'shop_downloadlink')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
|
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.Cha
|
rField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contact.contact': {
'Meta': {'object_name': 'Contact'},
'create_date': ('django.db.models.fields.DateField', [], {}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '500', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Organization']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactRole']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'contact.contactorganization': {
'Meta': {'object_name': 'ContactOrganization'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.contactorganizationrole': {
'Meta': {'object_name': 'ContactOrganizationRole'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.contactrole': {
'Meta': {'object_name': 'ContactRole'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contact.organization': {
'Meta': {'object_name': 'Organization'},
'create_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactOrganizationRole']", 'null': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.ContactOrganization']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'downloadable.downloadableproduct': {
'Meta': {'object_name': 'DownloadableProduct'},
'active': ('django.db.models.fiel
|
jobiols/odoo-argentina
|
l10n_ar_partner/__openerp__.py
|
Python
|
agpl-3.0
| 1,050
| 0
|
# -*- coding: utf-8 -*-
{
'author': "Moldeo Interactive,ADHOC SA,Odoo Community Association (OCA)",
'category': 'Localization/Argentina',
'depends': [
'partner_identification',
# this is for demo data, for fiscal position data on account
# and also beacuse it is essential for argentinian use
'l10n_ar_states',
# for the sales config
'base_setup',
],
'installable': True,
'license': 'AGPL-3',
'name': 'Títulos de Personería y Tipos de documentos Arentinos',
'data': [
'data/res_partner_title_data.xml',
'data/res_partner_id_category_data.xml',
'views/res_partn
|
er_view.xml',
'views/res_company_view.xml',
'views/res_partner_id_category_view.xml',
'views/res_partner_id_number_view.xml',
'sale_config_view.xml',
'security/security.xml',
],
'demo': [
'demo/pa
|
rtner_demo.xml',
],
'version': '9.0.1.3.0',
'post_init_hook': 'post_init_hook',
'pre_init_hook': 'pre_init_hook',
}
|
beppec56/core
|
scripting/source/pyprov/mailmerge.py
|
Python
|
gpl-3.0
| 17,916
| 0.030196
|
# Caolan McNamara caolanm@redhat.com
# a simple email mailmerge component
# manual installation for hackers, not necessary for users
# cp mailmerge.py /usr/lib/libreoffice/program
# cd /usr/lib/libreoffice/program
# ./unopkg add --shared mailmerge.py
# edit ~/.openoffice.org2/user/registry/data/org/openoffice/Office/Writer.xcu
# and change EMailSupported to as follows...
# <prop oor:name="EMailSupported" oor:type="xs:boolean">
# <value>true</value>
# </prop>
from __future__ import print_function
import unohelper
import uno
import re
import os
import encodings.idna
#to implement com::sun::star::mail::XMailServiceProvider
#and
#to implement com.sun.star.mail.XMailMessage
from com.sun.star.mail import XMailServiceProvider
from com.sun.star.mail import XMailService
from com.sun.star.mail import XSmtpService
from com.sun.star.mail import XConnectionListener
from com.sun.star.mail import XAuthenticator
from com.sun.star.mail import XMailMessage
from com.sun.star.mail.MailServiceType import SMTP
from com.sun.star.mail.MailServiceType import POP3
from com.sun.star.mail.MailServiceType import IMAP
from com.sun.star.uno import XCurrentContext
from com.sun.star.lang import IllegalArgumentException
from com.sun.star.lang import EventObject
from com.sun.star.lang import XServiceInfo
from com.sun.star.mail import SendMailMessageFailedException
from email.mime.base import MIMEBase
from email.message import Message
from email.charset import Charset
from email.charset import QP
from email.encoders import encode_base64
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.utils import formatdate
from email.utils import parseaddr
from socket import _GLOBAL_DEFAULT_TIMEOUT
import sys, smtplib, imaplib, poplib
dbg = False
# pythonloader looks for a static g_ImplementationHelper variable
g_ImplementationHelper = unohelper.ImplementationHelper()
g_providerImplName = "org.openoffice.pyuno.MailServiceProvider"
g_messageImplName = "org.openoffice.pyuno.MailMessage"
#no stderr under windows, output to pymailmerge.log
#with no buffering
if dbg and os.name == 'nt':
dbgout = open('pymailmerge.log', 'w', 0)
else:
dbgout = sys.stderr
class PyMailSMTPService(unohelper.Base, XSmtpService):
def __init__( self, ctx ):
self.ctx = ctx
self.listeners = []
self.supportedtypes = ('Insecure', 'Ssl')
self.server = None
self.connectioncontext = None
self.notify = EventObject(self)
if dbg:
print("PyMailSMTPService init", file=dbgout)
print("python version is: " + sys.version, file=dbgout)
def addConnectionListener(self, xListener):
if dbg:
print("PyMailSMTPService addConnectionListener", file=dbgout)
self.listeners.append(xListener)
def removeConnectionListener(self, xListener):
if dbg:
print("PyMailSMTPService removeConnectionListener", file=dbgout)
self.listeners.remove(xListener)
def getSupportedConnectionTypes(self):
if dbg:
print("PyMailSMTPService getSupportedConnectionTypes", file=dbgout)
return self.supportedtypes
def connect(self, xConnectionContext, xAuthenticator):
self.connectioncontext = xConnectionContext
if dbg:
print("PyMailSMTPService connect", file=dbgout)
server = xConnectionContext.getValueByName("ServerName")
if dbg:
print("ServerName: " + server, file=dbgout)
port = int(xConnectionContext.getValueByName("Port"))
if dbg:
print("Port: " + str(port), file=dbgout)
tout = xConnectionContext.getValueByName("Timeout")
if dbg:
print(isinstance(tout,int), file=dbgout)
if not isinstance(tout,int):
tout = _GLOBAL_DEFAULT_TIMEOUT
if dbg:
print("Timeout: " + str(tout), file=dbgout)
self.server = smtplib.SMTP(server, port,timeout=tout)
#stderr not available for us under windows, but
#set_debuglevel outputs there, and so throw
#an exception under windows on debugging mode
#with this enabled
if dbg and os.name != 'nt':
self.server.set_debuglevel(1)
connectiontype = xConnectionContext.getValueByName("ConnectionType")
if dbg:
print("ConnectionType: " + connectiontype, file=dbgout)
if connectiontype.upper() == 'SSL':
self.server.ehlo()
self.server.starttls()
self.server.ehlo()
user = xAuthenticator.getUserName()
password = xAuthenticator.getPassword()
if user != '':
if sys.version < '3': # fdo#59249 i#105669 Python 2 needs "ascii"
user = user.encode('ascii')
password = password.encode('ascii')
if dbg:
print("Logging in, username of: " + user, file=dbgout)
self.server.login(user, password)
for listener in self.listeners:
listener.connected(self.notify)
def disconnect(self):
if dbg:
print("PyMailSMTPService disconnect", file=dbgout)
if self.server:
self.server.quit()
self.server = None
for listener in self.listeners:
listener.disconnected(self.notify)
def isConnected(self):
if dbg:
print("PyMailSMTPService isConnected", file=dbgout)
return self.server != None
def getCurrentConnectionContext(self):
if dbg:
print("PyMailSMTPService getCurrentConnectionContext", file=dbgout)
return self.connectioncontext
def sendMailMessage(self, xMailMessage):
COMMASPACE = ', '
if dbg:
print("PyMailSMTPService sendMailMessage", file=dbgout)
recipients = xMailMessage.getRecipients()
sendermail = xMailMessage.SenderAddress
sendername = xMailMessage.SenderName
subject = xMailMessage.Subject
ccrecipients = xMailMessage.getCcRecipients()
bccrecipients = xMailMessage.getBccRecipients()
if dbg:
print("PyMailSMTPService subject: " + subject, file=dbgout)
print("PyMailSMTPService from: " + sendername, file=dbgout)
print("PyMailSMTPService from: " + sendermail, file=dbgout)
print("PyMailSMTPService send to: %s" % (recipients,), file=dbgout)
attachments = xMailMessage.getAttachments()
textmsg = Message()
content = xMailMessage.Body
flavor
|
s = content.getTransferDataFlavors()
if dbg:
print("PyMailSMTPService flavors len: %d" % (len(flavors),), file=dbgout)
#Use first flavor that's sane for an email body
for flavor in flavors:
if flavor.MimeType.find('text/html') != -1 or flavor.MimeType.find('text/plain') != -1:
if dbg:
print("PyMailSMTPService mimetype is: " + flavor.MimeType, file=dbgout)
textbody = content.getTransferData(flavor)
if len(textbody):
mimeEncoding = re.sub("charset=.*", "charset=UTF-8", f
|
lavor.MimeType)
if mimeEncoding.find('charset=UTF-8') == -1:
mimeEncoding = mimeEncoding + "; charset=UTF-8"
textmsg['Content-Type'] = mimeEncoding
textmsg['MIME-Version'] = '1.0'
try:
#it's a string, get it as utf-8 bytes
textbody = textbody.encode('utf-8')
except:
#it's a bytesequence, get raw bytes
textbody = textbody.value
if sys.version >= '3':
if sys.version_info.minor < 3 or (sys.version_info.minor == 3 and sys.version_info.micro <= 1):
#http://stackoverflow.com/questions/9403265/how-do-i-use-python-3-2-email-module-to-send-unicode-messages-encoded-in-utf-8-w
#see http://bugs.python.org/16564, etc. basically it now *seems* to be all ok
#in python 3.3.2 onwards, but a little busted in 3.3.0
textbody = textbody.decode('iso8859-1')
else:
textbody = textbody.decode('utf-8')
c = Charset('utf-8')
c.body_encoding = QP
textmsg.set_payload(textbody, c)
else:
textmsg.set_payload(textbody)
break
if (len(attachments)):
msg = MIMEMultipart()
msg.epilogue = ''
msg.attach(textmsg)
else:
msg = textmsg
hdr = Header(sendername, 'utf-8')
hdr.append('<'+sendermail+'>','us-ascii')
msg['Subject'] = subject
msg['From'] = hdr
msg['To'] = COMMASPACE.join(recipients)
if len(ccrecipients):
msg['Cc'] = COMMASPACE.join(ccrecipients)
if xMailMessage.ReplyToAddress != '':
msg['Reply-To'] = xMailMessage.ReplyToAddress
mailerstring = "LibreOffice via Caolan's mailmerge component"
try:
ctx = uno.getComponentContext()
aConfigProvider = ctx.ServiceManager.createInstance("com.sun.star.configuration.ConfigurationProvider")
prop = uno.createUnoStruct('com.sun.star.beans.PropertyValue')
prop.Name = "nodepath"
prop.Val
|
ESS-LLP/erpnext
|
erpnext/hooks.py
|
Python
|
gpl-3.0
| 28,711
| 0.021351
|
from __future__ import unicode_literals
from frappe import _
app_name = "erpnext"
app_title = "ERPNext"
app_publisher = "Frappe Technologies Pvt. Ltd."
app_description = """ERP made simple"""
app_icon = "fa fa-th"
app_color = "#e74c3c"
app_email = "info@erpnext.com"
app_license = "GNU General Public License (v3)"
source_link = "https://github.com/frappe/erpnext"
app_logo_url = "/assets/erpnext/images/erpnext-logo.svg"
develop_version = '13.x.x-develop'
app_include_js = "/assets/js/erpnext.min.js"
app_include_css = "/assets/css/erpnext.css"
web_include_js = "/assets/js/erpnext-web.min.js"
web_include_css = "/assets/css/erpnext-web.css"
doctype_js = {
"Address": "public/js/address.js",
"Communication": "public/js/communication.js",
"Event": "public/js/event.js",
"Newsletter": "public/js/newsletter.js"
}
override_doctype_class = {
'Address': 'erpnext.accounts.custom.address.ERPNextAddress'
}
welcome_email = "erpnext.setup.utils.welcome_email"
# setup wizard
setup_wizard_requires = "assets/erpnext/js/setup_wizard.js"
setup_wizard_stages = "erpnext.setup.setup_wizard.setup_wizard.get_setup_stages"
setup_wizard_test = "erpnext.setup.setup_wizard.test_setup_wizard.run_setup_wizard_test"
before_install = "erpnext.setup.install.check_setup_wizard_not_completed"
after_install = "erpnext.setup.install.after_install"
boot_session = "erpnext.startup.boot.boot_session"
notification_config = "erpnext.startup.notifications.get_notification_config"
get_help_messages = "erpnext.utilities.activation.get_help_messages"
leaderboards = "erpnext.startup.leaderboard.get_leaderboards"
filters_config = "erpnext.startup.filters.get_filters_config"
additional_print_settings = "erpnext.controllers.print_settings.get_print_settings"
on_session_creation = [
"erpnext.portal.utils.create_customer_or_supplier",
"erpnext.shopping_cart.utils.set_cart_count"
]
on_logout = "erpnext.shopping_cart.utils.clear_cart_count"
treeviews = ['Account', 'Cost Center', 'Warehouse', 'Item Group', 'Customer Group', 'Sales Person', 'Territory', 'Assessment Group', 'Department']
# website
update_website_context = ["erpnext.shopping_cart.utils.update_website_context", "erpnext.education.doctype.education_settings.education_settings.update_website_context"]
my_account_context = "erpnext.shopping_cart.utils.update_my_account_context"
calendars = ["Task", "Work Order", "Leave Application", "Sales Order", "Holiday List", "Course Schedule"]
domains = {
'Agriculture': 'erpnext.domains.agriculture',
'Distribution': 'erpnext.domains.distribution',
'Education': 'erpnext.domains.education',
'Healthcare': 'erpnext.domains.healthcare',
'Hospitality': 'erpnext.domains.hospitality',
'Manufacturing': 'erpnext.domains.manufacturing',
'Non Profit': 'erpnext.domains.non_profit',
'Retail': 'erpnext.domains.retail',
'Services': 'erpnext.domains.services',
}
website_generators = ["Item Group", "Item", "BOM", "Sales Partner",
"Job Opening", "Student Admission"]
website_context = {
"favicon": "/assets/erpnext/images/erpnext-favicon.svg",
"splash_image": "/assets/erpnext/images/erpnext-logo.svg"
}
website_route_rules = [
{"from_route": "/orders", "to_route": "Sales Order"},
{"from_route": "/orders/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Order",
"parents": [{"label": _("Orders"), "route": "orders"}]
}
},
{"from_route"
|
: "/invoices", "to_route": "Sale
|
s Invoice"},
{"from_route": "/invoices/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Sales Invoice",
"parents": [{"label": _("Invoices"), "route": "invoices"}]
}
},
{"from_route": "/supplier-quotations", "to_route": "Supplier Quotation"},
{"from_route": "/supplier-quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Supplier Quotation",
"parents": [{"label": _("Supplier Quotation"), "route": "supplier-quotations"}]
}
},
{"from_route": "/purchase-orders", "to_route": "Purchase Order"},
{"from_route": "/purchase-orders/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Purchase Order",
"parents": [{"label": _("Purchase Order"), "route": "purchase-orders"}]
}
},
{"from_route": "/purchase-invoices", "to_route": "Purchase Invoice"},
{"from_route": "/purchase-invoices/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Purchase Invoice",
"parents": [{"label": _("Purchase Invoice"), "route": "purchase-invoices"}]
}
},
{"from_route": "/quotations", "to_route": "Quotation"},
{"from_route": "/quotations/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Quotation",
"parents": [{"label": _("Quotations"), "route": "quotations"}]
}
},
{"from_route": "/shipments", "to_route": "Delivery Note"},
{"from_route": "/shipments/<path:name>", "to_route": "order",
"defaults": {
"doctype": "Delivery Note",
"parents": [{"label": _("Shipments"), "route": "shipments"}]
}
},
{"from_route": "/rfq", "to_route": "Request for Quotation"},
{"from_route": "/rfq/<path:name>", "to_route": "rfq",
"defaults": {
"doctype": "Request for Quotation",
"parents": [{"label": _("Request for Quotation"), "route": "rfq"}]
}
},
{"from_route": "/addresses", "to_route": "Address"},
{"from_route": "/addresses/<path:name>", "to_route": "addresses",
"defaults": {
"doctype": "Address",
"parents": [{"label": _("Addresses"), "route": "addresses"}]
}
},
{"from_route": "/jobs", "to_route": "Job Opening"},
{"from_route": "/admissions", "to_route": "Student Admission"},
{"from_route": "/boms", "to_route": "BOM"},
{"from_route": "/timesheets", "to_route": "Timesheet"},
{"from_route": "/material-requests", "to_route": "Material Request"},
{"from_route": "/material-requests/<path:name>", "to_route": "material_request_info",
"defaults": {
"doctype": "Material Request",
"parents": [{"label": _("Material Request"), "route": "material-requests"}]
}
},
]
standard_portal_menu_items = [
{"title": _("Personal Details"), "route": "/personal-details", "reference_doctype": "Patient", "role": "Patient"},
{"title": _("Projects"), "route": "/project", "reference_doctype": "Project"},
{"title": _("Request for Quotations"), "route": "/rfq", "reference_doctype": "Request for Quotation", "role": "Supplier"},
{"title": _("Supplier Quotation"), "route": "/supplier-quotations", "reference_doctype": "Supplier Quotation", "role": "Supplier"},
{"title": _("Purchase Orders"), "route": "/purchase-orders", "reference_doctype": "Purchase Order", "role": "Supplier"},
{"title": _("Purchase Invoices"), "route": "/purchase-invoices", "reference_doctype": "Purchase Invoice", "role": "Supplier"},
{"title": _("Quotations"), "route": "/quotations", "reference_doctype": "Quotation", "role":"Customer"},
{"title": _("Orders"), "route": "/orders", "reference_doctype": "Sales Order", "role":"Customer"},
{"title": _("Invoices"), "route": "/invoices", "reference_doctype": "Sales Invoice", "role":"Customer"},
{"title": _("Shipments"), "route": "/shipments", "reference_doctype": "Delivery Note", "role":"Customer"},
{"title": _("Issues"), "route": "/issues", "reference_doctype": "Issue", "role":"Customer"},
{"title": _("Addresses"), "route": "/addresses", "reference_doctype": "Address"},
{"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"},
{"title": _("Lab Test"), "route": "/lab-test", "reference_doctype": "Lab Test", "role":"Patient"},
{"title": _("Prescription"), "route": "/prescription", "reference_doctype": "Patient Encounter", "role":"Patient"},
{"title": _("Patient Appointment"), "route": "/patient-appointments", "reference_doctype": "Patient Appointment", "role":"Patient"},
{"title": _("Fees"), "route": "/fees", "reference_doctype": "Fees", "role":"Student"},
{"title": _("Newsletter"), "route": "/newsletters", "reference_doctype": "Newsletter"},
{"title": _("Admission"), "route": "/admissions", "reference_doctype": "Student Admission", "role": "Student"},
{"title": _("Certification"), "route": "/certification", "reference_doctype": "Certification Application", "role": "Non Profit Portal User"},
{"title": _("Material Request"), "rou
|
9seconds/isitbullshit
|
setup.py
|
Python
|
mit
| 2,097
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.command.test import test
REQUIREMENTS = (
"six",
)
with open("README.rst", "r") as resource:
LONG_DESCRIPTION = resource.read()
# copypasted from http://pytest.org/latest/goodpractises.html
class PyTest(test):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
test.initialize_options(self)
|
self.pytest_args = None # pylint: disable=W0201
def finalize_options(self):
test.finalize_options(self)
self.test_args = [] # pylint: disable=W0201
self.test_suite = True # pylint: disable=W0201
def r
|
un_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
import sys
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name="isitbullshit",
description=("Small library for verifying parsed JSONs "
"if they are bullshit or not"),
long_description=LONG_DESCRIPTION,
version="0.2.1",
author="Sergey Arkhipov",
license="MIT",
author_email="serge@aerialsounds.org",
maintainer="Sergey Arkhipov",
maintainer_email="serge@aerialsounds.org",
url="https://github.com/9seconds/isitbullshit/",
install_requires=REQUIREMENTS,
keywords="json validation jsonschema",
tests_require=["pytest==2.6.1"],
packages=find_packages(),
cmdclass={'test': PyTest},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Testing",
],
zip_safe=False
)
|
Overdrivr/DistantIO
|
distantio/__init__.py
|
Python
|
mit
| 295
| 0.003413
|
# Copyright (C) 2014 Rémi
|
Bèges
# For conditions of distribution and use, see copyright notice in the LICENSE file
from distantio.DistantIO import DistantIO
from distantio.DistantIOProtocol import distantio_protocol
from distantio.SerialPort import SerialPort
from distantio.crc impor
|
t crc16
|
paulorauber/rl
|
examples/blackjack.py
|
Python
|
mit
| 5,813
| 0.001892
|
import numpy as np
from itertools import product
from learning.model_free import Problem
from learning.model_free import sarsa
from learning.model_free import qlearning
from learning.model_free import mc_value_iteration
from learning.model_free import sarsa_lambda
from learning.model_free import q_lambda
# from learning.model_building import dyna_q_learning
# from learning.model_building import dyna_q_learning_last_visit
# from learning.model_building import dyna_q_learning_stochastic
class BlackJack(Problem):
def __init__(self):
# Sum of player's cards, dealer's showing card, usable ace
self.states = [(-1, -1, -1)]
self.states += [(i, j, k)
for (i, j, k) in product(range(12, 22), range(1, 11), [0, 1])]
self.a = ['hit', 'stick']
self.states_map = {s: i for i, s in enumerate(self.states)}
Problem.__init__(self, len(self.states), len(self.a))
def get_card(self):
return min(10, np.random.randint(1, 14))
def sample_initial_state(self):
my_card = np.random.randint(12, 22)
dealer_showing = self.get_card()
usable_ace = np.random.randint(0, 2)
return self.states_map[(my_card, dealer_showing, usable_ace)]
def hand_value(self, sum_cards, usable_ace):
if usable_ace and sum_cards > 21:
return sum_cards - 10
return sum_cards
def actions(self, s):
(my_sum, _, usable_ace) = self.states[s]
if self.hand_value(my_sum, usable_ace) >= 21:
return [1]
else:
return [0, 1]
def is_final(self, s):
return s == 0
# Computes the next state and reward pair and whether the state is final
def state_reward(self, s, a):
(my_sum, dealer_card, usable_ace) = self.states[s]
next_s = self.states_map[(my_sum, dealer_card, usable_ace)]
if a == 1: # Stick
if self.hand_value(my_sum, usable_ace) > 21:
return 0, -1
dealer_sum = dealer_card
dealer_usable_ace = 0
if dealer_card == 1:
dealer_sum += 10
dealer_usable_ace = 1
while self.hand_value(dealer_sum, dealer_usable_ace) < self.hand_value(my_sum, usable_ace):
card = self.get_card()
dealer_sum += card
if card == 1:
dealer_sum += 10
if card == 1 or dealer_usable_ace:
if dealer_sum <= 21:
dealer_usable_ace = 1
else:
dealer_sum -= 10
dealer_usable_ace = 0
if self.hand_value(dealer_sum, dealer_usable_ace) == self.hand_value(my_sum, usable_ace) == 17:
return 0, 0
if self.hand_value(dealer_sum, dealer_usable_ace) > 21:
return 0, 1
if self.hand_value(dealer_sum, dealer_usable_ace) == self.hand_value(my_sum, usable_ace):
return 0, 0
if self.hand_value(dealer_sum, dealer_usable_ace) < self.hand_value(my_sum, usable_ace):
return 0, 1
# if dealer_sum > my_sum:
return 0, -1
else: # Hit
card = self.get_card()
my_sum += card
if card == 1:
my_sum += 10
if card == 1 or usable_ace:
if my_sum <= 21:
usable_ace = 1
|
else:
my_sum -= 10
usable_ace = 0
if self.hand_value(my_sum, usable_ace) > 21:
return 0, -1
# Only nonterminal case
next_s = self.states_map[(my_sum, dealer_card, usable_ace)]
return next_s, 0
raise Exception('Unexpected state/action pair')
def print_policy(self, policy):
print('Usable ace:')
for i, state in enumerate(self.states):
if stat
|
e[2]:
print('Hand value: {0}, Dealer Showing: {1}, Action: {2}'.format(
self.hand_value(state[0], 1), state[1], self.a[policy[i]]))
print('No usable ace:')
for i, state in enumerate(self.states):
if not state[2]:
print('Hand value: {0}, Dealer Showing: {1}, Action: {2}'.format(
self.hand_value(state[0], 0), state[1], self.a[policy[i]]))
def print_values(self, values):
for i in range(len(values)):
print('State {0}. Value: {1}'.format(self.states[i], values[i]))
def main():
problem = BlackJack()
pi, v = sarsa(problem, 10000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = qlearning(problem, 10000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = sarsa_lambda(problem, 10000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = q_lambda(problem, 10000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = mc_value_iteration(problem, 10000, 10000, 0.2)
problem.print_policy(pi)
problem.print_values(v)
# pi, v = dyna_q_learning(problem, 30, 50, epsilon = 0.1, alpha = 0.1, gamma = 0.9)
#
# problem.print_policy(pi)
# problem.print_values(v)
#
# pi, v = dyna_q_learning_last_visit(problem, 30, 50, epsilon = 0.1, alpha = 0.1, gamma = 0.9, kappa = 0.00)
# problem.print_policy(pi)
# problem.print_values(v)
#
# pi, v = dyna_q_learning_stochastic(problem, 30, 50, epsilon = 0.1, alpha = 0.1, gamma = 0.9)
#
# problem.print_policy(pi)
# problem.print_values(v)
if __name__ == "__main__":
main()
|
ProjectFacet/facet
|
project/editorial/migrations/0050_auto_20171117_1716.py
|
Python
|
mit
| 437
| 0.002288
|
# -*- coding:
|
utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0049_auto_20171116_1526'),
]
operations = [
migrations.AlterField(
model_name='facet',
name='story',
field=models.ForeignKey(related_name='storyfacet', to='editorial.Story'),
|
),
]
|
spyder-ide/spyder-terminal
|
spyder_terminal/config.py
|
Python
|
mit
| 1,346
| 0.000743
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Spyder terminal default configuration."""
import os
import sys
WINDOWS = os.name == 'nt'
LINUX = sys.platform.starts
|
with('linux')
CONF_SECTION = 'terminal'
CONF_DEFAULTS = [
(CONF_SECTION,
{
'sound': True,
'cursor_type': 0,
'shell': 'cmd' if WINDOWS else 'bash',
'buffer_limit': 1000,
'cu
|
rsor_blink': True,
'zoom': 0,
}
),
('shortcuts',
{
'terminal/copy': 'Ctrl+Alt+Shift+C' if LINUX else 'Ctrl+Alt+C',
'terminal/paste': 'Ctrl+Alt+Shift+V' if LINUX else 'Ctrl+Alt+V',
'terminal/new_terminal': 'Ctrl+Alt+T',
'terminal/clear': 'Ctrl+Alt+K',
'terminal/zoom_in': 'Ctrl++',
'terminal/zoom_out': 'Ctrl+-',
}
)
]
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you need to
# do a MINOR update in config version, e.g. from 1.0.0 to 1.1.0
# 2. If you want to *remove* options that are no longer needed in our codebase,
# or if you want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 1.0.0 to 2.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = '2.0.0'
|
aaivazis/nautilus
|
nautilus/auth/util/token_encryption_algorithm.py
|
Python
|
mit
| 52
| 0.019231
|
def token_encryption_algorith
|
m():
return 'HS2
|
56'
|
fernandezcuesta/ansible
|
lib/ansible/modules/cloud/vmware/vmware_dvswitch.py
|
Python
|
gpl-3.0
| 7,199
| 0.001945
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
|
choices:
- 'cdp'
- 'll
|
dp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI,
connect_to_api,
find_datacenter_by_name,
find_dvs_by_name,
vmware_argument_spec,
wait_for_task
)
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
for count in range(1, self.uplink_quantity+1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
if __name__ == '__main__':
main()
|
Fillll/reddit2telegram
|
reddit2telegram/channels/~inactive/r_bapcsaleseurope/app.py
|
Python
|
mit
| 153
| 0.006536
|
#encoding:utf-8
subreddit = 'BaPCSalesEurope'
t_channel = '@r_BaPCSalesEurope'
def send_po
|
st(submission, r2t):
|
return r2t.send_simple(submission)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.