repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pombreda/https-gitorious.org-appstream-software-center
|
softwarecenter/backend/channel.py
|
1
|
12578
|
# Copyright (C) 2010 Canonical
#
# Authors:
# Gary Lasker
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import xapian
from gettext import gettext as _
from softwarecenter.distro import get_distro
from softwarecenter.enums import (SortMethods,
Icons,
ViewPages,
)
LOG = logging.getLogger(__name__)
class ChannelsManager(object):
def __init__(self, db, **kwargs):
self.distro = get_distro()
self.db = db
# public
@property
def channels(self):
return self._get_channels_from_db()
@property
def channels_installed_only(self):
return self._get_channels_from_db(True)
@classmethod
def channel_available(kls, channelname):
pass
# private
def _get_channels_from_db(self, installed_only=False):
"""
(internal) implements 'channels()' and 'channels_installed_only()'
properties
"""
distro_channel_origin = self.distro.get_distro_channel_name()
# gather the set of software channels and order them
other_channel_list = []
cached_origins = []
for channel_iter in self.db.xapiandb.allterms("XOL"):
if len(channel_iter.term) == 3:
continue
channel_name = channel_iter.term[3:]
channel_origin = ""
# get origin information for this channel
m = self.db.xapiandb.postlist_begin(channel_iter.term)
doc = self.db.xapiandb.get_document(m.get_docid())
for term_iter in doc.termlist():
if (term_iter.term.startswith("XOO") and
len(term_iter.term) > 3):
channel_origin = term_iter.term[3:]
break
LOG.debug("channel_name: %s" % channel_name)
LOG.debug("channel_origin: %s" % channel_origin)
if channel_origin not in cached_origins:
other_channel_list.append((channel_name, channel_origin))
cached_origins.append(channel_origin)
dist_channel = None
other_channels = []
unknown_channel = []
local_channel = None
for (channel_name, channel_origin) in other_channel_list:
if not channel_name:
unknown_channel.append(SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
elif channel_origin == distro_channel_origin:
dist_channel = (SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
elif channel_name == "notdownloadable":
if installed_only:
local_channel = SoftwareChannel(
channel_name,
None,
None,
installed_only=installed_only)
else:
other_channels.append(SoftwareChannel(
channel_name,
channel_origin,
None,
installed_only=installed_only))
# set them in order
channels = []
if dist_channel is not None:
channels.append(dist_channel)
channels.extend(other_channels)
channels.extend(unknown_channel)
if local_channel is not None:
channels.append(local_channel)
for channel in channels:
if installed_only:
channel._channel_view_id = ViewPages.INSTALLED
else:
channel._channel_view_id = ViewPages.AVAILABLE
return channels
class SoftwareChannel(object):
"""
class to represent a software channel
"""
ICON_SIZE = 24
def __init__(self, channel_name, channel_origin, channel_component,
source_entry=None, installed_only=False,
channel_icon=None, channel_query=None,
channel_sort_mode=SortMethods.BY_ALPHABET):
"""
configure the software channel object based on channel name,
origin, and component (the latter for detecting the partner
channel)
"""
self._channel_name = channel_name
self._channel_origin = channel_origin
self._channel_component = channel_component
self._channel_color = None
self._channel_view_id = None
self.installed_only = installed_only
self._channel_sort_mode = channel_sort_mode
# distro specific stuff
self.distro = get_distro()
# configure the channel
self._channel_display_name = self._get_display_name_for_channel(
channel_name, channel_origin, channel_component)
if channel_icon is None:
self._channel_icon = self._get_icon_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_icon = channel_icon
if channel_query is None:
self._channel_query = self._get_channel_query_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_query = channel_query
# a sources.list entry attached to the channel (this is currently
# only used for not-yet-enabled channels)
self._source_entry = source_entry
# when the channel needs to be added to the systems sources.list
self.needs_adding = False
@property
def name(self):
"""
return the channel name as represented in the xapian database
"""
return self._channel_name
@property
def origin(self):
"""
return the channel origin as represented in the xapian database
"""
return self._channel_origin
@property
def component(self):
"""
return the channel component as represented in the xapian database
"""
return self._channel_component
@property
def display_name(self):
"""
return the display name for the corresponding channel for use in the UI
"""
return self._channel_display_name
@property
def icon(self):
"""
return the icon that corresponds to each channel based
on the channel name, its origin string or its component
"""
return self._channel_icon
@property
def query(self):
"""
return the xapian query to be used with this software channel
"""
return self._channel_query
@property
def sort_mode(self):
"""
return the sort mode for this software channel
"""
return self._channel_sort_mode
# TODO: implement __cmp__ so that sort for channels is encapsulated
# here as well
def _get_display_name_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
channel_display_name = _("Canonical Partners")
elif not channel_origin:
channel_display_name = _("Unknown")
elif channel_origin == self.distro.get_distro_channel_name():
channel_display_name = self.distro.get_distro_channel_description()
elif channel_name == "For Purchase":
channel_display_name = _("For Purchase")
elif channel_name == "Application Review Board PPA":
channel_display_name = _("Independent")
elif channel_name == "notdownloadable":
channel_display_name = _("Other")
else:
return channel_name
return channel_display_name
def _get_icon_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
channel_icon = "partner"
elif not channel_name:
channel_icon = "unknown-channel"
elif channel_origin == self.distro.get_distro_channel_name():
channel_icon = "distributor-logo"
elif channel_name == "Application Review Board PPA":
channel_icon = "system-users"
elif channel_name == "For Purchase":
channel_icon = "emblem-money"
elif channel_origin and channel_origin.startswith("LP-PPA"):
channel_icon = "ppa"
elif channel_name == "notdownloadable":
channel_icon = "application-default-icon"
# TODO: add check for generic repository source (e.g., Google, Inc.)
# channel_icon = "generic-repository"
else:
channel_icon = "unknown-channel"
return channel_icon
def _get_channel_query_for_channel(self, channel_name, channel_origin,
channel_component):
if channel_component == "partner":
q1 = xapian.Query("XOCpartner")
q2 = xapian.Query("AH%s-partner" % self.distro.get_codename())
channel_query = xapian.Query(xapian.Query.OP_OR, q1, q2)
# show only apps when displaying the new apps archive
elif channel_name == "Application Review Board PPA":
channel_query = xapian.Query(xapian.Query.OP_AND,
xapian.Query("XOL" + channel_name),
xapian.Query("ATapplication"))
elif channel_origin:
channel_query = xapian.Query("XOO" + channel_origin)
else:
channel_query = xapian.Query("XOL" + channel_name)
return channel_query
def __str__(self):
details = []
details.append("* SoftwareChannel")
details.append(" name: %s" % self.name)
details.append(" origin: %s" % self.origin)
details.append(" component: %s" % self.component)
details.append(" display_name: %s" % self.display_name)
details.append(" iconname: %s" % self.icon)
details.append(" query: %s" % self.query)
details.append(" sort_mode: %s" % self.sort_mode)
details.append(" installed_only: %s" % self.installed_only)
return unicode('\n'.join(details), 'utf8').encode('utf8')
class AllChannel(SoftwareChannel):
def __init__(self, channel_name, installed_only):
SoftwareChannel.__init__(
self, channel_name, "all", None,
installed_only=installed_only,
channel_icon=Icons.FALLBACK)
# overrides
def _get_display_name_for_channel(self, channel_name, channel_origin,
channel_component):
return channel_name
def _get_channel_query_for_channel(self, *args):
pass
class AllAvailableChannel(AllChannel):
def __init__(self):
AllChannel.__init__(self, _("All Software"), False)
class AllInstalledChannel(AllChannel):
def __init__(self):
AllChannel.__init__(self, _("All Installed"), True)
# singleton
channels_manager = None
def get_channels_manager(db):
global channels_manager
if channels_manager is None:
from softwarecenter.enums import USE_PACKAGEKIT_BACKEND
if not USE_PACKAGEKIT_BACKEND:
from softwarecenter.backend.channel_impl.aptchannels import (
AptChannelsManager)
channels_manager = AptChannelsManager(db)
else:
channels_manager = ChannelsManager(db)
return channels_manager
def is_channel_available(channelname):
from softwarecenter.backend.channel_impl.aptchannels import (
AptChannelsManager)
return AptChannelsManager.channel_available(channelname)
if __name__ == "__main__":
distro = get_distro()
channel = SoftwareChannel(distro.get_distro_channel_name(),
None, None)
print(channel)
channel = SoftwareChannel(distro.get_distro_channel_name(), None,
"partner")
print(channel)
|
gpl-3.0
| 6,576,376,662,612,633,000
| 33.938889
| 79
| 0.591827
| false
| 4.356772
| false
| false
| false
|
mteule/StationMeteo
|
messy-doc/StationMeteo.Diagram/StationMeteo.Diagram.py
|
1
|
1485
|
class Sensor :
'''
https://www.google.fr/#q=NVARCHAR+encodage+mysql
https://stackoverflow.com/questions/612430/when-must-we-use-nvarchar-nchar-instead-of-varchar-char-in-sql-servers
Nvarchar ne sert que pour les utilisateurs MS-SQL. '''
def __init__(self) :
pass
class Station :
'''(NULL)'''
def __init__(self) :
self.logger = logging.getLogger(__name__) #
self.ser = serial.Serial() #
self.datab = DatabManager() #
self.raw_received_meterings = "" # str
self.metering_quantity = 0 # int
self.last_meterings_list = list() #
self.sensor_dict = dict('id': ,'name': ) #
pass
def _get_meterings_raw_data (self) :
# returns
pass
def _parse_raw_data (self) :
# returns
pass
def _store_meterings (self) :
# returns
pass
def setup (self) :
# returns
pass
def loop (self) :
# returns
pass
class DatabManager :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#adding-new-objects'''
def __init__(self) :
self.logger = logging.getLogger(__name__) #
self.engine_url = 'sqlite:///:memory:' # str
self.engine = sqlalchemy.create_engine(engine_url, echo = True) #
self.Session = sqlalchemy.orm.sessionmaker(bind=engine) #
self.session = Session() #
pass
class Metering :
'''
http://docs.sqlalchemy.org/en/rel_0_8/orm/tutorial.html#declare-a-mapping
>>> from sqlalchemy.ext.declarative import declarative_base
>>> declarative_base()
<class 'sqlalchemy.ext.declarative.Base'>
>>>
'''
def __init__(self) :
pass
|
mit
| -8,164,009,308,065,374,000
| 26
| 113
| 0.665993
| false
| 2.855769
| false
| false
| false
|
ppwwyyxx/tensorflow
|
tensorflow/python/ops/array_ops.py
|
1
|
192961
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
# LINT.IfChange
matrix_diag_v3_forward_compat_date = (2019, 12, 6)
# LINT.ThenChange(
# //tensorflow/compiler/tests/matrix_diag_ops_test.py,
# //tensorflow/python/kernel_tests/diag_op_test.py,
# //tensorflow/python/ops/parallel_for/array_test.py
# )
@tf_export("reshape", v1=["reshape", "manip.reshape"])
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
r"""Reshapes a tensor.
Given `tensor`, this operation returns a new `tf.Tensor` that has the same
values as `tensor` in the same order, except with a new shape given by
`shape`.
>>> t1 = [[1, 2, 3],
... [4, 5, 6]]
>>> print(tf.shape(t1).numpy())
[2 3]
>>> t2 = tf.reshape(t1, [6])
>>> t2
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t2, [3, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
The `tf.reshape` does not change the order of or the total number of elements
in the tensor, and so it can reuse the underlying data buffer. This makes it
a fast operation independent of how big of a tensor it is operating on.
>>> tf.reshape([1, 2, 3], [2, 2])
Traceback (most recent call last):
...
InvalidArgumentError: Input to reshape is a tensor with 3 values, but the
requested shape has 4
To instead reorder the data to rearrange the dimensions of a tensor, see
`tf.transpose`.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [3, 2]).numpy()
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
>>> tf.transpose(t, perm=[1, 0]).numpy()
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular,
a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can
be -1.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t, [3, -1])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
>>> tf.reshape(t, [-1, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
`tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.
>>> tf.reshape([7], []).numpy()
7
More examples:
>>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(tf.shape(t).numpy())
[9]
>>> tf.reshape(t, [3, 3])
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)>
>>> t = [[[1, 1], [2, 2]],
... [[3, 3], [4, 4]]]
>>> print(tf.shape(t).numpy())
[2 2 2]
>>> tf.reshape(t, [2, 4])
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[1, 1, 2, 2],
[3, 3, 4, 4]], dtype=int32)>
>>> t = [[[1, 1, 1],
... [2, 2, 2]],
... [[3, 3, 3],
... [4, 4, 4]],
... [[5, 5, 5],
... [6, 6, 6]]]
>>> print(tf.shape(t).numpy())
[3 2 3]
>>> # Pass '[-1]' to flatten 't'.
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(18,), dtype=int32,
numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
dtype=int32)>
>>> # -- Using -1 to infer the shape --
>>> # Here -1 is inferred to be 9:
>>> tf.reshape(t, [2, -1])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 2:
>>> tf.reshape(t, [-1, 9])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 3:
>>> tf.reshape(t, [ 2, -1, 3])
<tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=
array([[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]], dtype=int32)>
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: Optional string. A name for the operation.
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
```
# Output tensor has shape [2, 3].
fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
`tf.fill` differs from `tf.constant` in a few ways:
* `tf.fill` only supports scalar contents, whereas `tf.constant` supports
Tensor values.
* `tf.fill` creates an Op in the computation graph that constructs the
actual
Tensor value at runtime. This is in contrast to `tf.constant` which embeds
the entire Tensor into the graph with a `Const` node.
* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
based on other runtime Tensors, unlike `tf.constant`.
Args:
dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
Represents the shape of the output tensor.
value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy) Equivalent to np.full @end_compatibility
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
For example:
```python
import tensorflow as tf
val0 = tf.ones((1,), dtype=tf.float32)
a = tf.atan2(val0, val0)
a_identity = tf.identity(a)
print(a.numpy()) #[0.7853982]
print(a_identity.numpy()) #[0.7853982]
```
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(input, composite_tensor.CompositeTensor):
return nest.map_structure(identity, input, expand_composites=True)
if context.executing_eagerly() and not hasattr(input, "graph"):
# Make sure we get an input with handle data attached from resource
# variables. Variables have correct handle data when graph building.
input = ops.convert_to_tensor(input)
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Returns a tensor with an additional dimension inserted at index `axis`.
Given a tensor `input`, this operation inserts a dimension of size 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of one image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Examples:
>>> t = [[1, 2, 3],[4, 5, 6]] # shape [2, 3]
>>> tf.expand_dims(t, 0)
<tf.Tensor: shape=(1, 2, 3), dtype=int32, numpy=
array([[[1, 2, 3],
[4, 5, 6]]], dtype=int32)>
>>> tf.expand_dims(t, 1)
<tf.Tensor: shape=(2, 1, 3), dtype=int32, numpy=
array([[[1, 2, 3]],
[[4, 5, 6]]], dtype=int32)>
>>> tf.expand_dims(t, 2)
<tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
array([[[1],
[2],
[3]],
[[4],
[5],
[6]]], dtype=int32)>
>>> tf.expand_dims(t, -1) # Last dimension index. In this case, same as 2.
<tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
array([[[1],
[2],
[3]],
[[4],
[5],
[6]]], dtype=int32)>
This operation is related to:
* `tf.squeeze`, which removes dimensions of size 1.
* `tf.reshape`, which provides more flexible reshaping capability
Args:
input: A `Tensor`.
axis: Integer specifying the dimension index at which to expand the
shape of `input`. Given an input of D dimensions, `axis` must be in range
`[-(D+1), D]` (inclusive).
name: Optional string. The name of the output `Tensor`.
Returns:
A tensor with the same data as `input`, with an additional dimension
inserted at the index specified by `axis`.
Raises:
ValueError: If `axis` is not specified.
InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When shape_x and shape_y are fully known TensorShapes this computes a
TensorShape which is the shape of the result of a broadcasting op applied in
tensors of shapes shape_x and shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
TensorShape whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
>>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
>>> tf.shape(t)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>
>>> tf.shape(t).numpy()
array([2, 2, 3], dtype=int32)
Note: When using symbolic tensors, such as when using the Keras functional
API, tf.shape() will return the shape of the symbolic tensor.
>>> a = tf.keras.layers.Input((None, 10))
>>> tf.shape(a)
<tf.Tensor ... shape=(3,) dtype=int32>
In these cases, using `tf.Tensor.shape` will return more informative results.
>>> a.shape
TensorShape([None, None, 10])
Args:
input: A `Tensor` or `SparseTensor`.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return shape(input, name, out_type)
@tf_export(v1=["shape"])
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation (`int32` or `int64`).
Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if (context.executing_eagerly() and not hasattr(input, "graph") and
not isinstance(
input,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _is_undefined_dimension(d):
return isinstance(d, tensor_shape.Dimension) and d.value is None
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# Strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# Skip every other row and reverse the order of the columns
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# Masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable object to slice
(i.e. tensor is the read-only view of this variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
if s.start is not None and not _is_undefined_dimension(s.start):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and not _is_undefined_dimension(s.stop):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None and not _is_undefined_dimension(s.step):
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
"strided_slice", [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input_` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input_` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input_`. In other
words, `begin[i]` is the offset into the i'th dimension of `input_` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input_.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input_`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if not (var is None and isinstance(op, ops.EagerTensor)):
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
else:
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
>>> x = tf.constant([1, 4])
>>> y = tf.constant([2, 5])
>>> z = tf.constant([3, 6])
>>> tf.stack([x, y, z])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)>
>> tf.stack([x, y, z], axis=1)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
This is the opposite of unstack. The numpy equivalent is `np.stack`
>>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
True
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if ops.is_dense_tensor_like(elem):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
# The condition we really want is
# ops.is_dense_tensor_like(...)
# but it is >5x slower due to abc.ABCMeta.__instancecheck__.
# pylint: disable=unidiomatic-typecheck
# TODO(slebedev): add nest.all?
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
# pylint: enable=unidiomatic-typecheck
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred if
`None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
>>> t1 = [[1, 2, 3], [4, 5, 6]]
>>> t2 = [[7, 8, 9], [10, 11, 12]]
>>> concat([t1, t2], 0)
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> concat([t1, t2], 1)
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 7, 8, 9],
[ 4, 5, 6, 10, 11, 12]], dtype=int32)>
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
>>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
>>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
>>> tf.concat([t1, t2], -1)
<tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=
array([[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]], dtype=int32)>
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing for
axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
to `axis`-th dimension. And negative axis refers to `axis +
rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where_v2(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer, then `value` is split along dimension
`axis` into `num_split` smaller tensors. This requires that `num_split` evenly
divides `value.shape[axis]`.
If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
`axis` or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along `axis`. If a scalar, then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split axis
must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if isinstance(num_or_size_splits,
six.integer_types + (tensor_shape.Dimension,)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if not tensor_util.is_tensor(a):
a = ops.convert_to_tensor(a, name="a")
if conjugate and a.dtype.is_complex:
transpose_fn = gen_array_ops.conjugate_transpose
else:
transpose_fn = gen_array_ops.transpose
if perm is not None:
return transpose_fn(a, perm, name=name)
rank = a.shape.rank
if rank is None:
perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
else:
perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
return transpose_fn(a, perm, name=name)
# pylint: disable=invalid-name
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
tensor with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.linalg.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
diagonals of a matrix, with everything else padded with `padding`. `num_rows`
and `num_cols` specify the dimension of the innermost matrix of the output. If
both are not specified, the op assumes the innermost matrix is square and
infers its size from `k` and the innermost dimension of `diagonal`. If only
one of them is specified, the op assumes the unspecified value is the smallest
possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is
scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
padding_value ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the
same batch (`M = k[1]-k[0]+1`), and the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
padding_value ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]],
[[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]]]
# A superdiagonal (per batch).
diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]],
[[0, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 6],
[0, 0, 0, 0]]]
# A tridiagonal band (per batch).
diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
[1, 2, 3],
[0, 4, 5]],
[[2, 3, 0],
[6, 7, 9],
[0, 9, 1]]])
tf.matrix_diag(diagonals, k = (-1, 1))
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
[1, 2, 3],
[4, 5, 0]],
[[0, 2, 3],
[6, 7, 9],
[9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 1), align="RIGHT_LEFT")
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# Rectangular matrix.
diagonal = np.array([1, 2]) # Input shape: (2)
tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4)
[1, 0, 0, 0],
[0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding_value = 9.
tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
==> [[9, 9], # Output shape: (3, 2)
[1, 9],
[9, 2]]
```
Args:
diagonal: A `Tensor` with `rank k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor. Has the same type as `diagonal`.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v3(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
align=align,
name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_diag(diagonal=diagonal, name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@deprecation.deprecated_endpoints("matrix_diag_part")
@dispatch.add_dispatch_support
def matrix_diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
`input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
Let `max_diag_len` be the maximum length among all diagonals to be extracted,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
Let `num_diags` be the number of diagonals to extract,
`num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
`[I, J, ..., L, max_diag_len]` and values:
```
diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions
`[I, J, ..., L, num_diags, max_diag_len]` with values:
```
diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
The input must be at least a matrix.
For example:
```
input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
# A main diagonal from each batch.
tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch.
tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3)
[4, 3, 8]]
# A band from each batch.
tf.matrix_diag_part(input, k = (-1, 2))
==> [[[3, 8, 0], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[0, 5, 8]],
[[3, 4, 0],
[4, 3, 8],
[5, 2, 7],
[0, 1, 6]]]
# RIGHT_LEFT alignment.
tf.matrix_diag_part(input, k = (-1, 2), align="RIGHT_LEFT")
==> [[[0, 3, 8], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[5, 8, 0]],
[[0, 3, 4],
[4, 3, 8],
[5, 2, 7],
[1, 6, 0]]]
# max_diag_len can be shorter than the main diagonal.
tf.matrix_diag_part(input, k = (-2, -1))
==> [[[5, 8],
[0, 9]],
[[1, 6],
[0, 5]]]
# padding_value = 9
tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3)
[3, 8, 9],
[2, 7, 6]],
[[2, 9, 9],
[3, 4, 9],
[4, 3, 8]]]
```
Args:
input: A `Tensor` with `rank k >= 2`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v3(
input=input, k=k, padding_value=padding_value, align=align, name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input, # pylint:disable=redefined-builtin
diagonal,
name="set_diag",
k=0,
align="RIGHT_LEFT"):
"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the specified diagonals of the
innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
If `k` is scalar or `k[0] == k[1]`:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
input[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_set_diag(input, diagonal)
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
# A superdiagonal (per batch).
tf.matrix_set_diag(input, diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
[7, 7, 2, 7],
[7, 7, 7, 3]],
[[7, 4, 7, 7],
[7, 7, 5, 7],
[7, 7, 7, 6]]]
# A band of diagonals.
diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[0, 4, 5]],
[[1, 2, 0],
[5, 6, 4],
[6, 1, 2],
[0, 3, 4]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2))
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[4, 5, 0]],
[[0, 1, 2],
[5, 6, 4],
[6, 1, 2],
[3, 4, 0]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="RIGHT_LEFT")
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
```
Args:
input: A `Tensor` with rank `k + 1`, where `k >= 1`.
diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
otherwise. `k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
"""
if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):
return gen_array_ops.matrix_set_diag_v3(
input=input, diagonal=diagonal, k=k, align=align, name=name)
# Call v1 to maintain forward compatibility.
# (We skip v2 because its alignment conflicts with v3's default alignment.)
return gen_array_ops.matrix_set_diag(
input=input, diagonal=diagonal, name=name)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
>>> tf.zeros([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: The DType of an element in the resulting `Tensor`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]] with dtype=int32
If dtype of input `tensor` is `float32`, then the output is also of `float32`
tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor) # [[0., 0., 0.], [0., 0., 0.]] with dtype=floa32
If you want to specify desired dtype of output `tensor`, then specify it in
the op tensor = tf.constant([[1.0, 2.0, 3.0], [4, 5, 6]])
tf.zeros_like(tensor,dtype=tf.int32) # [[0, 0, 0], [0, 0, 0]] with
dtype=int32
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
if not tensor_util.is_tensor(tensor):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor_shape = tensor.shape
tensor_dtype = tensor.dtype
if context.executing_eagerly():
if dtype is not None and dtype != tensor_dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor_shape.is_fully_defined() and
tensor_dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)
if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
`complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to one.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to 1. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to one.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to one (1).
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to one.
>>> tf.ones([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: Optional DType of an element in the resulting `Tensor`. Default is
`tf.float32`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to one (1).
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will
succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tensor(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = _get_paddings_constant(paddings)
input_shape = (
tensor_shape.TensorShape(tensor.shape)
if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def _get_paddings_constant(paddings):
"""Helper to get the constant values of the paddings arg to pad().
Used under V1 graph mode to facilitate computation of the shape of the output
tensor of `pad()`.
Args:
paddings: The same paddings arg as passed to pad(). Can be a Tensor, or
a nested list or tuple of Tensor and/or numbers.
Returns:
A nested list or numbers or `None`, in which `None` indicates unknown
padding size.
"""
if isinstance(paddings, ops.Tensor):
return tensor_util.constant_value(paddings, partial=True)
elif isinstance(paddings, (list, tuple)):
return [_get_paddings_constant(x) for x in paddings]
else:
return paddings
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
paddings,
block_size=None,
name=None,
block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch (see `tf.space_to_batch`).
Args:
input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +
remaining_shape`, where `spatial_shape` has M dimensions.
block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following
types: `int32`, `int64`. All values must be >= 1. For backwards
compatibility with TF 1.0, this parameter may be an int, in which case it
is converted to
`numpy.array([block_shape, block_shape],
dtype=numpy.int64)`.
crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the
following types: `int32`, `int64`. All values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from
input dimension `i + 1`, which corresponds to spatial dimension `i`.
It is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
input_shape[N-1]]
2. Permute dimensions of `reshaped` to produce `permuted` of shape
[batch / prod(block_shape), input_shape[1], block_shape[0], ...,
input_shape[M], block_shape[M-1], input_shape[M+1],
..., input_shape[N-1]]
3. Reshape `permuted` to produce `reshaped_permuted` of shape
[batch / prod(block_shape), input_shape[1] * block_shape[0], ...,
input_shape[M] * block_shape[M-1], input_shape[M+1], ...,
input_shape[N-1]]
4. Crop the start and end of dimensions `[1, ..., M]` of
`reshaped_permuted` according to `crops` to produce the output
of shape:
[batch / prod(block_shape), input_shape[1] *
block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],
..., input_shape[N-1]]
Some Examples:
(1) For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[[1]]],
[[[2]]],
[[[3]]],
[[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
``` x = [[[[1], [2]],
[[3], [4]]]] ```
(2) For the following input of shape `[4, 1, 1, 3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[1, 2, 3]],
[[4, 5, 6]],
[[7, 8, 9]],
[[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```python
x = [[[[1, 2, 3], [4, 5, 6 ]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following
input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
x = [[[[1], [3]], [[ 9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```python
x = [[[1], [2], [ 3], [ 4]],
[[5], [6], [ 7], [ 8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
(4) For the following input of shape
`[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
```python
x = [[[[0], [ 1], [ 3]]],
[[[0], [ 9], [11]]],
[[[0], [ 2], [ 4]]],
[[[0], [10], [12]]],
[[[0], [ 5], [ 7]]],
[[[0], [13], [15]]],
[[[0], [ 6], [ 8]]],
[[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```python
x = [[[[ 1], [ 2], [ 3], [ 4]],
[[ 5], [ 6], [ 7], [ 8]]],
[[[ 9], [10], [11], [12]],
[[13], [14], [15], [16]]]] ```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
the values of the RaggedTensor, and creating a new RaggedTensor from the
result.
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
indices = tf.ragged.constant([[0, 1], [2]])
depth = 3
tf.one_hot(indices, depth) # output: [2 x None x 3]
# [[[1., 0., 0.],
# [0., 1., 0.]],
# [[0., 0., 1.]]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (
ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists else None)
off_dtype = (
ops.convert_to_tensor(off_value).dtype.base_dtype
if off_exists else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t)).numpy())
[2 3]
Or, to remove specific size 1 dimensions:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())
[1 2 3 1]
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
deprecated `squeeze_dims` argument.
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: The input cannot be converted to a tensor, or the specified
axis cannot be squeezed.
"""
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are tensors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
def where_v2(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `condition`, `x` and `y` must be broadcastable to the same
shape.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which is of the same type as `y`, and may be broadcastable with
`condition` and `y`.
y: A Tensor which is of the same type as `x`, and may be broadcastable with
`condition` and `x`.
name: A name of the operation (optional).
Returns:
A `Tensor` with the same type as `x` and `y`, and shape that
is broadcast from `condition`, `x`, and `y`, if `x`, `y` are non-None.
Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
"""Reverses variable length slices.
This op first slices `input` along the dimension `batch_axis`, and for
each slice `i`, reverses the first `seq_lengths[i]` elements along the
dimension `seq_axis`.
The elements of `seq_lengths` must obey `seq_lengths[i] <=
input.dims[seq_dim]`, and `seq_lengths` must be a vector of length
`input.dims[batch_dim]`.
The output slice `i` along dimension `batch_axis` is then given by
input slice `i`, with the first `seq_lengths[i]` slices along
dimension `seq_axis` reversed.
Example usage:
>>> seq_lengths = [7, 2, 3, 5]
>>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],
... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]
>>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)
>>> output
<tf.Tensor: shape=(4, 8), dtype=int32, numpy=
array([[0, 0, 5, 4, 3, 2, 1, 0],
[2, 1, 0, 0, 0, 0, 0, 0],
[3, 2, 1, 4, 0, 0, 0, 0],
[5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>
Args:
`input`: A `Tensor`. The input to reverse.
`seq_lengths`: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) <=
input.dims(seq_dim)`
`seq_axis`: An `int`. The dimension which is partially reversed.
`batch_axis`: An optional `int`. Defaults to `0`. The dimension along which
reversal is performed.
`name`: A name for the operation (optional).
Returns:
A Tensor. Has the same type as input.
"""
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
@tf_export("reverse_sequence", v1=[])
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence_v2.__doc__ = reverse_sequence.__doc__
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
r"""Gather slices from params axis `axis` according to indices.
Gather slices from params axis `axis` according to `indices`. `indices` must
be an integer tensor of any dimension (usually 0-D or 1-D).
For 0-D (scalar) `indices`:
$$\begin{align*}
output[p_0, ..., p_{axis-1}, && &&& p_{axis + 1}, ..., p_{N-1}] = \\
params[p_0, ..., p_{axis-1}, && indices, &&& p_{axis + 1}, ..., p_{N-1}]
\end{align*}$$
Where *N* = `ndims(params)`.
For 1-D (vector) `indices` with `batch_dims=0`:
$$\begin{align*}
output[p_0, ..., p_{axis-1}, && &i, &&p_{axis + 1}, ..., p_{N-1}] =\\
params[p_0, ..., p_{axis-1}, && indices[&i], &&p_{axis + 1}, ..., p_{N-1}]
\end{align*}$$
In the general case, produces an output tensor where:
$$\begin{align*}
output[p_0, &..., p_{axis-1}, &
&i_{B}, ..., i_{M-1}, &
p_{axis + 1}, &..., p_{N-1}] = \\
params[p_0, &..., p_{axis-1}, &
indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], &
p_{axis + 1}, &..., p_{N-1}]
\end{align*}$$
Where *N* = `ndims(params)`, *M* = `ndims(indices)`, and *B* = `batch_dims`.
Note that `params.shape[:batch_dims]` must be identical to
`indices.shape[:batch_dims]`.
The shape of the output tensor is:
> `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
> params.shape[axis + 1:]`.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the corresponding
output value.
See also `tf.gather_nd`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than `rank(indices)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if axis is None:
axis = batch_dims
if tensor_util.constant_value(axis) != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
# (And also now because of 'axis' processing).
@tf_export(v1=["quantize_v2"])
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO",
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if compat.forward_compatible(2019, 11, 13) or ensure_minimum_range != 0.01:
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@deprecation.deprecated_endpoints("quantize")
def quantize(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None,
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
"""Quantize the input tensor."""
if compat.forward_compatible(2019, 11, 13) or ensure_minimum_range != 0.01:
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis)
@tf_export("quantization.dequantize", v1=["quantization.dequantize",
"dequantize"])
@deprecation.deprecated_endpoints("dequantize")
def dequantize( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
min_range,
max_range,
mode="MIN_COMBINED",
name=None,
axis=None,
narrow_range=False):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if compat.forward_compatible(2019, 10, 22) or axis >= 0 or narrow_range:
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name,
narrow_range=narrow_range, axis=axis)
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name)
dequantize.__doc__ = gen_array_ops.dequantize.__doc__
@tf_export("quantization.quantize_and_dequantize")
def quantize_and_dequantize(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
"""Quantizes then dequantizes a tensor.
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value, that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of minimum values for each slice along axis.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of maximum values for each slice along axis.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
one of ['HALF_TO_EVEN', 'HALF_UP']
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
axis: Integer. If specified, refers to a dimension of the input tensor, such
that quantization will be per slice along that dimension.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("searchsorted")
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches input tensor for values on the innermost dimension.
A 2-D example:
```
sorted_sequence = [[0, 3, 9, 9, 10],
[1, 2, 3, 4, 5]]
values = [[2, 4, 9],
[0, 2, 6]]
result = searchsorted(sorted_sequence, values, side="left")
result == [[1, 2, 2],
[0, 1, 5]]
result = searchsorted(sorted_sequence, values, side="right")
result == [[1, 2, 4],
[0, 2, 5]]
```
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of values containing the result of applying either
lower_bound or upper_bound (depending on side) to each value. The result
is not a global index to the entire `Tensor`, but the index in the last
dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of values exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images`.
This op collects patches from the input image, as if applying a
convolution. All extracted patches are stacked in the depth (last) dimension
of the output.
Specifically, the op extracts patches of shape `sizes` which are `strides`
apart in the input image. The output is subsampled using the `rates` argument,
in the same manner as "atrous" or "dilated" convolutions.
The result is a 4D tensor which is indexed by batch, row, and column.
`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
which is taken from the input starting at
`images[i, x*strides[1], y*strides[2]]`.
Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
`depth` is `images.shape[3]`.
The output elements are taken from the input at intervals given by the `rate`
argument, as in dilated convolutions.
The `padding` argument has no effect on the size of each patch, it determines
how many patches are extracted. If `VALID`, only patches which are fully
contained in the input image are included. If `SAME`, all patches whose
starting point is inside the input are included, and areas outside the input
default to zero.
Example:
```
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
# We generate two outputs as follows:
# 1. 3x3 patches with stride length 5
# 2. Same as above, but the rate is increased to 2
tf.extract_image_patches(images=images,
ksizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 1, 1, 1],
padding='VALID')
# Yields:
[[[[ 1 2 3 11 12 13 21 22 23]
[ 6 7 8 16 17 18 26 27 28]]
[[51 52 53 61 62 63 71 72 73]
[56 57 58 66 67 68 76 77 78]]]]
```
If we mark the pixels in the input image which are taken for the output with
`*`, we see the pattern:
```
* * * 4 5 * * * 9 10
* * * 14 15 * * * 19 20
* * * 24 25 * * * 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
* * * 54 55 * * * 59 60
* * * 64 65 * * * 69 70
* * * 74 75 * * * 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
```
```
tf.extract_image_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 2, 2, 1],
padding='VALID')
# Yields:
[[[[ 1 3 5 21 23 25 41 43 45]
[ 6 8 10 26 28 30 46 48 50]]
[[ 51 53 55 71 73 75 91 93 95]
[ 56 58 60 76 78 80 96 98 100]]]]
```
We can again draw the effect, this time using the symbols `*`, `x`, `+` and
`o` to distinguish the patches:
```
* 2 * 4 * x 7 x 9 x
11 12 13 14 15 16 17 18 19 20
* 22 * 24 * x 27 x 29 x
31 32 33 34 35 36 37 38 39 40
* 42 * 44 * x 47 x 49 x
+ 52 + 54 + o 57 o 59 o
61 62 63 64 65 66 67 68 69 70
+ 72 + 74 + o 77 o 79 o
81 82 83 84 85 86 87 88 89 90
+ 92 + 94 + o 97 o 99 o
```
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
sizes: The size of the extracted patches. Must be [1, size_rows, size_cols,
1].
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A 4-D Tensor of the same type as the input.
"""
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
def fingerprint(data, method="farmhash64", name=None):
r"""Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension,
and `output[i]` contains the fingerprint value generated from contents in
`data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the
default method `farmhash64` generates a 64-bit fingerprint value at a time.
This 8-byte value is written out as an `tf.uint8` array of size 8, in
little-endian order.
For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
and that the fingerprint method is `farmhash64`. In this case, the output
shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
size of each fingerprint value in bytes. `output[0, :]` is generated from
12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not
fingerprint Tensor's metadata such as data type and/or shape. For example, the
fingerprint values are invariant under reshapes and bitcasts as long as the
batch dimension remain the same:
```python
tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
```
For string data, one should expect `tf.fingerprint(data) !=
tf.fingerprint(tf.string.reduce_join(data))` in general.
Args:
data: A `Tensor`. Must have rank 1 or higher.
method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
Currently available method is `farmhash64`.
name: A name for the operation (optional).
Returns:
A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm.
"""
return gen_array_ops.fingerprint(data, method, name)
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
"""Converts the given value to an integer Tensor."""
tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
if tensor.dtype.is_integer:
tensor = gen_math_ops.cast(tensor, dtype)
else:
raise TypeError("%s must be an integer tensor; dtype=%s" %
(name, tensor.dtype))
return tensor
def get_positive_axis(axis, ndims):
"""Validate an `axis` parameter, and normalize it to be positive.
If `ndims` is known (i.e., not `None`), then check that `axis` is in the
range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or
`axis + ndims` (otherwise).
If `ndims` is not known, and `axis` is positive, then return it as-is.
If `ndims` is not known, and `axis` is negative, then report an error.
Args:
axis: An integer constant
ndims: An integer constant, or `None`
Returns:
The normalized `axis` value.
Raises:
ValueError: If `axis` is out-of-bounds, or if `axis` is negative and
`ndims is None`.
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
if ndims is not None:
if 0 <= axis < ndims:
return axis
elif -ndims <= axis < 0:
return axis + ndims
else:
raise ValueError("axis=%s out of bounds: expected %s<=axis<%s" %
(axis, -ndims, ndims))
elif axis < 0:
raise ValueError("axis may only be negative if ndims is statically known.")
return axis
# This op is intended to exactly match the semantics of numpy.repeat, with
# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
# when axis is not specified. Rather than implement that special behavior, we
# simply make `axis` be a required argument.
#
# External (OSS) `tf.repeat` feature request:
# https://github.com/tensorflow/tensorflow/issues/8246
def repeat_with_axis(data, repeats, axis, name=None):
"""Repeats elements of `data`.
Args:
data: An `N`-dimensional tensor.
repeats: A 1-D integer tensor specifying how many times each element in
`axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.
Supports broadcasting from a scalar value.
axis: `int`. The axis along which to repeat values. Must be less than
`max(N, 1)`.
name: A name for the operation.
Returns:
A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,
except that dimension `axis` has size `sum(repeats)`.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
with ops.name_scope(name, "Repeat", [data, repeats]):
data = ops.convert_to_tensor(data, name="data")
repeats = convert_to_int_tensor(repeats, name="repeats")
repeats.shape.with_rank_at_most(1)
# If `data` is a scalar, then upgrade it to a vector.
data = _with_nonzero_rank(data)
data_shape = shape(data)
# If `axis` is negative, then convert it to a positive value.
axis = get_positive_axis(axis, data.shape.ndims)
# Check data Tensor shapes.
if repeats.shape.ndims == 1:
data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
# If we know that `repeats` is a scalar, then we can just tile & reshape.
if repeats.shape.ndims == 0:
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, repeats)
result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
axis=0)
return reshape(tiled, result_shape)
# Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
if repeats.shape.ndims != axis + 1:
repeats_shape = shape(repeats)
repeats_ndims = rank(repeats)
broadcast_shape = concat(
[data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
repeats = broadcast_to(repeats, broadcast_shape)
repeats.set_shape([None] * (axis + 1))
# Create a "sequence mask" based on `repeats`, where slices across `axis`
# contain one `True` value for each repetition. E.g., if
# `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
max_repeat = gen_math_ops.maximum(
0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
mask = sequence_mask(repeats, max_repeat)
# Add a new dimension around each value that needs to be repeated, and
# then tile that new dimension to match the maximum number of repetitions.
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
# Use `boolean_mask` to discard the extra repeated values. This also
# flattens all dimensions up through `axis`.
masked = boolean_mask(tiled, mask)
# Reshape the output tensor to add the outer dimensions back.
if axis == 0:
result = masked
else:
result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
axis=0)
result = reshape(masked, result_shape)
# Preserve shape information.
if data.shape.ndims is not None:
new_axis_size = 0 if repeats.shape[0] == 0 else None
result.set_shape(data.shape[:axis].concatenate(
[new_axis_size]).concatenate(data.shape[axis + 1:]))
return result
def tile_one_dimension(data, axis, multiple):
"""Tiles a single dimension of a tensor."""
# Assumes axis is a nonnegative int.
if data.shape.ndims is not None:
multiples = [1] * data.shape.ndims
multiples[axis] = multiple
else:
ones_value = ones(rank(data), dtypes.int32)
multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],
axis=0)
return tile(data, multiples)
def _with_nonzero_rank(data):
"""If `data` is scalar, then add a dimension; otherwise return as-is."""
if data.shape.ndims is not None:
if data.shape.ndims == 0:
return stack([data])
else:
return data
else:
data_shape = shape(data)
data_ndims = rank(data)
return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])
@tf_export("repeat")
def repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin
"""Repeat elements of `input`.
Args:
input: An `N`-dimensional Tensor.
repeats: An 1-D `int` Tensor. The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis. `len(repeats)`
must equal `input.shape[axis]` if axis is not None.
axis: An int. The axis along which to repeat values. By default (axis=None),
use the flattened input array, and return a flat output array.
name: A name for the operation.
Returns:
A Tensor which has the same shape as `input`, except along the given axis.
If axis is None then the output array is flattened to match the flattened
input array.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
>>> repeat(3, repeats=4)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>
>>> repeat([[1,2], [3,4]], repeats=2)
<tf.Tensor: shape=(8,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>
"""
if axis is None:
input = reshape(input, [-1])
axis = 0
return repeat_with_axis(input, repeats, axis, name)
|
apache-2.0
| 5,979,389,566,144,273,000
| 34.926457
| 129
| 0.608781
| false
| 3.381069
| false
| false
| false
|
FKlama/hycud
|
REMO.py
|
1
|
2344
|
# HYCUD
# Copyright (C) 2014 Klama, Nina Alexandra and Rezaei-Ghaleh, Nasrollah
#
# This file is part of HYCUD.
#
# HYCUD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HYCUD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HYCUD. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import subprocess
import gc
from os import path
from HelperFunctions import waitTillFileExists, ANSI_ESC, flush
from multiprocessing import Pool
esc = ANSI_ESC()
class runREMO:
"""Class returning a function to run REMO for one model"""
def __init__(self, opt, count):
self.opt = opt
self.size = count
def __call__(self, model):
tmpDir = ""
if self.opt.keepTemp:
tmpDir = path.join(self.opt.tmpPath, ("REMO_%09i" % model.num))
os.makedirs(tmpDir)
else:
tmpDir = tempfile.mkdtemp(prefix="REMO_", suffix="", dir=self.opt.tmpPath)
(PDB_dir, PDB_file) = path.split(model.getPDB())
tmpPDB = path.join(tmpDir, PDB_file)
remoExe = path.join(self.opt.REMOPath, "REMO.pl")
subprocess.check_call(['ln', '-s', model.getPDB(), tmpPDB])
waitTillFileExists(tmpPDB)
os.chdir(tmpDir)
if self.opt.verbose > 1:
print("nice -n", str(self.opt.nice), "perl" , remoExe, "0", PDB_file)
elif self.opt.verbose > 0:
print("{0}2K{0}1GCalculating REMO {1:6n}/{2:n}".format(
esc, model.num, self.size), end='')
flush()
subprocess.check_output(['nice', '-n', str(self.opt.nice), 'perl', remoExe, "0", PDB_file])
waitTillFileExists(tmpPDB + ".h")
subprocess.check_call(['mv', (tmpPDB + '.h'), model.getSprouted()])
def sproutModels(opt, models):
"""Function sprouts full sidechains for a given set of protein models"""
sprout_pool = Pool(processes=opt.threads)
task = runREMO(opt, models.size())
sprout_pool.map(task, models.models)
if opt.verbose > 0:
print("")
gc.collect()
|
gpl-3.0
| -9,100,186,542,631,282,000
| 32.971014
| 95
| 0.680461
| false
| 3.197817
| false
| false
| false
|
rwth-ti/gr-ofdm
|
python/ofdm/qa_freqshift.py
|
1
|
13032
|
#!/usr/bin/env python
#
# Copyright 2014 Institute for Theoretical Information Technology,
# RWTH Aachen University
# www.ti.rwth-aachen.de
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, eng_notation
import ofdm as ofdm
import os
import sys, numpy, random, math, cmath
from numpy import concatenate
import numpy
class qa_ofdm (gr_unittest.TestCase):
def setUp (self):
self.fg = gr.top_block ("test_block")
def tearDown (self):
self.fg = None
# no shift
def test_001 (self):
vlen = 128
syms = 4
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[vlen/2-vlen/4] = 1.0
vec = concatenate([vec]*syms)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f([0.0]*syms)
trig = gr.vector_source_b([1]*syms)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual(vec, numpy.array(dst.data()))
# simple shift by -1.0, one frequency bin
def test_002 (self):
vlen = 128
syms = 4
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[vlen/2-vlen/4] = 1.0
vec = concatenate([vec]*syms)
epsilon = [-1]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[vlen/2-vlen/4+i*vlen+epsilon[0]] = 1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by -1.0, two frequency bins, asymmetric
def test_003 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = 1.0
vec[bin2] = -1.0
vec = concatenate([vec]*syms)
epsilon = [-1]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = 1.0
expec[bin2+i*vlen+epsilon[i]] = -1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by -1.0, two frequency bins, _symmetric_
def test_004 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/4
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = 1.0
vec[bin2] = -1.0
vec = concatenate([vec]*syms)
epsilon = [-1]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = 1.0
expec[bin2+i*vlen+epsilon[i]] = -1.0
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-6)
# simple shift by +10.0, two frequency bins, asymmetric
def test_005 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
bin1_val = 1.0
bin2_val = -1.0j
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = bin1_val
vec[bin2] = bin2_val
vec = concatenate([vec]*syms)
epsilon = [+10]*syms
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = bin1_val
expec[bin2+i*vlen+epsilon[i]] = bin2_val
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5)
# different shifts per symbol, two frequency bins, asymmetric
def test_006 (self):
vlen = 128
syms = 4
bin1 = vlen/2-vlen/4
bin2 = vlen/2+vlen/3
bin1_val = 1.0j
bin2_val = -1.0
vec = numpy.array(numpy.zeros(vlen), numpy.complex)
vec[bin1] = bin1_val
vec[bin2] = bin2_val
vec = concatenate([vec]*syms)
epsilon = [1,-4,5,2]
frame_trigger = [1]*syms
expec = numpy.array(numpy.zeros(vlen*syms), numpy.complex)
for i in range(syms):
expec[bin1+i*vlen+epsilon[i]] = bin1_val
expec[bin2+i*vlen+epsilon[i]] = bin2_val
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
ifft = gr.fft_vcc(vlen, False, [], True)
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.vector_source_c(vec)
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger)
self.fg.connect(src, s2v, ifft, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5)
# one signal at frequency 1.5 / vlen, shifted to 2.0+vlen/2 bin
# initial phase offset remains constant through all symbols in one frame
def test_007 (self):
vlen = 128
syms = 4
bin1 = vlen/2 + 2
bin1_val = 1.0
expec = numpy.array(numpy.zeros(vlen), numpy.complex)
expec[bin1] = bin1_val
expec = concatenate([expec]*syms)
epsilon = [0.5]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
src = gr.sig_source_c(vlen, gr.GR_COS_WAVE, 1.5, 1.0, 0.0)
# bin vlen/2 + 1.5
dst = gr.vector_sink_c()
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, vlen)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, s2v, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5, 1e-5)
# one signal at frequency 4.5 / vlen, shifted to 4.0+vlen/2 bin
# tests phase correction for cyclic prefix
def test_008 (self):
vlen = 128
syms = 4
bin1 = vlen/2 + 4
bin1_val = 1.0
cp_length = vlen/4
expec = numpy.array(numpy.zeros(vlen), numpy.complex)
expec[bin1] = bin1_val
expec = concatenate([expec]*syms)
epsilon = [-0.5]
frame_trigger = numpy.concatenate([[1],[0]*(syms-1)])
freq_shift = ofdm.frequency_shift_vcc(vlen, 1.0/vlen, cp_length)
fft = gr.fft_vcc(vlen, True, [], True) # natural order, dc = vlen / 2
fft_scale = gr.multiply_const_vcc([1.0/vlen]*vlen)
sampler = ofdm.vector_sampler(gr.sizeof_gr_complex,vlen)
trigger_vec = concatenate([[0]*(vlen+cp_length-1),[1]])
trigger_vec = concatenate([trigger_vec]*syms)
trigger = gr.vector_source_b(trigger_vec.tolist())
src = gr.sig_source_c(vlen, gr.GR_COS_WAVE, 4.5, 1.0, 0.0) # bin vlen/2 + 4.5
dst = gr.vector_sink_c()
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, vlen)
eps = gr.vector_source_f(epsilon)
trig = gr.vector_source_b(frame_trigger.tolist())
self.fg.connect(src, (sampler,0))
self.fg.connect(trigger, (sampler,1))
self.fg.connect(sampler, (freq_shift,0))
self.fg.connect(eps, (freq_shift,1))
self.fg.connect(trig, (freq_shift,2))
self.fg.connect(freq_shift, fft, fft_scale, v2s, dst)
self.fg.run()
self.assertComplexTuplesAlmostEqual2(expec, dst.data(), 1e-5, 1e-5)
def test_100(self):
vlen = 256
cp_len = 12
M = 10
N = int(3e6)
uut = ofdm.frequency_shift_vcc( vlen, 1.0/vlen, cp_len )
trig = [0]*M
trig[0] = 1
eps = [1.]*M
src1 = gr.vector_source_c( [1.]*(M*vlen), True, vlen )
src2 = gr.vector_source_f( eps, True )
src3 = gr.vector_source_b( trig, True )
dst = gr.null_sink( gr.sizeof_gr_complex * vlen )
limit3 = gr.head( gr.sizeof_char, N )
self.fg.connect( src1, ( uut, 0 ) )
self.fg.connect( src2, ( uut, 1 ) )
self.fg.connect( src3, limit3, ( uut, 2 ) )
self.fg.connect( uut, dst )
r = time_it( self.fg )
print "Rate %s" % \
( eng_notation.num_to_str( float( ( vlen + cp_len ) * N ) / r ) )
def time_it(tb):
start = os.times()
tb.run()
stop = os.times()
delta = map((lambda a, b: a-b), stop, start)
user, sys, childrens_user, childrens_sys, real = delta
total_user = user + childrens_user
total_sys = sys + childrens_sys
print "real %7.3f" % (real,)
print "user %7.3f" % (total_user,)
print "sys %7.3f" % (total_sys,)
return real
if __name__ == '__main__':
gr_unittest.main()
|
gpl-3.0
| 5,002,455,819,893,132,000
| 30.708029
| 81
| 0.607812
| false
| 2.79297
| true
| false
| false
|
keishi/chromium
|
chrome/test/pyautolib/remote_inspector_client.py
|
1
|
39894
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome remote inspector utility for pyauto tests.
This script provides a python interface that acts as a front-end for Chrome's
remote inspector module, communicating via sockets to interact with Chrome in
the same way that the Developer Tools does. This -- in theory -- should allow
a pyauto test to do anything that Chrome's Developer Tools does, as long as the
appropriate communication with the remote inspector is implemented in this
script.
This script assumes that Chrome is already running on the local machine with
flag '--remote-debugging-port=9222' to enable remote debugging on port 9222.
To use this module, first create an instance of class RemoteInspectorClient;
doing this sets up a connection to Chrome's remote inspector. Then call the
appropriate functions on that object to perform the desired actions with the
remote inspector. When done, call Stop() on the RemoteInspectorClient object
to stop communication with the remote inspector.
For example, to take v8 heap snapshots from a pyauto test:
import remote_inspector_client
my_client = remote_inspector_client.RemoteInspectorClient()
snapshot_info = my_client.HeapSnapshot(include_summary=True)
// Do some stuff...
new_snapshot_info = my_client.HeapSnapshot(include_summary=True)
my_client.Stop()
It is expected that a test will only use one instance of RemoteInspectorClient
at a time. If a second instance is instantiated, a RuntimeError will be raised.
RemoteInspectorClient could be made into a singleton in the future if the need
for it arises.
"""
import asyncore
import datetime
import logging
import optparse
import pprint
import simplejson
import socket
import sys
import threading
import time
import urllib2
import urlparse
class _DevToolsSocketRequest(object):
"""A representation of a single DevToolsSocket request.
A DevToolsSocket request is used for communication with a remote Chrome
instance when interacting with the renderer process of a given webpage.
Requests and results are passed as specially-formatted JSON messages,
according to a communication protocol defined in WebKit. The string
representation of this request will be a JSON message that is properly
formatted according to the communication protocol.
Public Attributes:
method: The string method name associated with this request.
id: A unique integer id associated with this request.
params: A dictionary of input parameters associated with this request.
results: A dictionary of relevant results obtained from the remote Chrome
instance that are associated with this request.
is_fulfilled: A boolean indicating whether or not this request has been sent
and all relevant results for it have been obtained (i.e., this value is
True only if all results for this request are known).
is_fulfilled_condition: A threading.Condition for waiting for the request to
be fulfilled.
"""
def __init__(self, method, params, message_id):
"""Initialize.
Args:
method: The string method name for this request.
message_id: An integer id for this request, which is assumed to be unique
from among all requests.
"""
self.method = method
self.id = message_id
self.params = params
self.results = {}
self.is_fulfilled = False
self.is_fulfilled_condition = threading.Condition()
def __repr__(self):
json_dict = {}
json_dict['method'] = self.method
json_dict['id'] = self.id
if self.params:
json_dict['params'] = self.params
return simplejson.dumps(json_dict, separators=(',', ':'))
class _DevToolsSocketClient(asyncore.dispatcher):
"""Client that communicates with a remote Chrome instance via sockets.
This class works in conjunction with the _RemoteInspectorThread class to
communicate with a remote Chrome instance following the remote debugging
communication protocol in WebKit. This class performs the lower-level work
of socket communication.
Public Attributes:
handshake_done: A boolean indicating whether or not the client has completed
the required protocol handshake with the remote Chrome instance.
inspector_thread: An instance of the _RemoteInspectorThread class that is
working together with this class to communicate with a remote Chrome
instance.
"""
def __init__(self, verbose, show_socket_messages, hostname, port, path):
"""Initialize.
Args:
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating with the remote
Chrome instance.
hostname: The string hostname of the DevToolsSocket to which to connect.
port: The integer port number of the DevToolsSocket to which to connect.
path: The string path of the DevToolsSocket to which to connect.
"""
asyncore.dispatcher.__init__(self)
self._logger = logging.getLogger('_DevToolsSocketClient')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
self._show_socket_messages = show_socket_messages
self._read_buffer = ''
self._write_buffer = ''
self._socket_buffer_lock = threading.Lock()
self.handshake_done = False
self.inspector_thread = None
# Connect to the remote Chrome instance and initiate the protocol handshake.
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((hostname, port))
fields = [
'Upgrade: WebSocket',
'Connection: Upgrade',
'Host: %s:%d' % (hostname, port),
'Origin: http://%s:%d' % (hostname, port),
'Sec-WebSocket-Key1: 4k0L66E ZU 8 5 <18 <TK 7 7',
'Sec-WebSocket-Key2: s2 20 `# 4| 3 9 U_ 1299',
]
handshake_msg = ('GET %s HTTP/1.1\r\n%s\r\n\r\n\x47\x30\x22\x2D\x5A\x3F'
'\x47\x58' % (path, '\r\n'.join(fields)))
self._Write(handshake_msg.encode('utf-8'))
def SendMessage(self, msg):
"""Causes a request message to be sent to the remote Chrome instance.
Args:
msg: A string message to be sent; assumed to be a JSON message in proper
format according to the remote debugging protocol in WebKit.
"""
# According to the communication protocol, each request message sent over
# the wire must begin with '\x00' and end with '\xff'.
self._Write('\x00' + msg.encode('utf-8') + '\xff')
def _Write(self, msg):
"""Causes a raw message to be sent to the remote Chrome instance.
Args:
msg: A raw string message to be sent.
"""
self._write_buffer += msg
self.handle_write()
def handle_write(self):
"""Called if a writable socket can be written; overridden from asyncore."""
self._socket_buffer_lock.acquire()
if self._write_buffer:
sent = self.send(self._write_buffer)
if self._show_socket_messages:
msg_type = ['Handshake', 'Message'][self._write_buffer[0] == '\x00' and
self._write_buffer[-1] == '\xff']
msg = ('========================\n'
'Sent %s:\n'
'========================\n'
'%s\n'
'========================') % (msg_type,
self._write_buffer[:sent-1])
print msg
self._write_buffer = self._write_buffer[sent:]
self._socket_buffer_lock.release()
def handle_read(self):
"""Called when a socket can be read; overridden from asyncore."""
self._socket_buffer_lock.acquire()
if self.handshake_done:
# Process a message reply from the remote Chrome instance.
self._read_buffer += self.recv(4096)
pos = self._read_buffer.find('\xff')
while pos >= 0:
pos += len('\xff')
data = self._read_buffer[:pos-len('\xff')]
pos2 = data.find('\x00')
if pos2 >= 0:
data = data[pos2 + 1:]
self._read_buffer = self._read_buffer[pos:]
if self._show_socket_messages:
msg = ('========================\n'
'Received Message:\n'
'========================\n'
'%s\n'
'========================') % data
print msg
if self.inspector_thread:
self.inspector_thread.NotifyReply(data)
pos = self._read_buffer.find('\xff')
else:
# Process a handshake reply from the remote Chrome instance.
self._read_buffer += self.recv(4096)
pos = self._read_buffer.find('\r\n\r\n')
if pos >= 0:
pos += len('\r\n\r\n')
data = self._read_buffer[:pos]
self._read_buffer = self._read_buffer[pos:]
self.handshake_done = True
if self._show_socket_messages:
msg = ('=========================\n'
'Received Handshake Reply:\n'
'=========================\n'
'%s\n'
'=========================') % data
print msg
self._socket_buffer_lock.release()
def handle_close(self):
"""Called when the socket is closed; overridden from asyncore."""
self.close()
def writable(self):
"""Determines if writes can occur for this socket; overridden from asyncore.
Returns:
True, if there is something to write to the socket, or
False, otherwise.
"""
return len(self._write_buffer) > 0
def handle_expt(self):
"""Called when out-of-band data exists; overridden from asyncore."""
self.handle_error()
def handle_error(self):
"""Called when an exception is raised; overridden from asyncore."""
self.close()
self.inspector_thread.ClientSocketExceptionOccurred()
asyncore.dispatcher.handle_error(self)
class _RemoteInspectorThread(threading.Thread):
"""Manages communication using Chrome's remote inspector protocol.
This class works in conjunction with the _DevToolsSocketClient class to
communicate with a remote Chrome instance following the remote inspector
communication protocol in WebKit. This class performs the higher-level work
of managing request and reply messages, whereas _DevToolsSocketClient handles
the lower-level work of socket communication.
"""
def __init__(self, tab_index, verbose, show_socket_messages):
"""Initialize.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
use for snapshotting.
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating with the remote
Chrome instance.
"""
threading.Thread.__init__(self)
self._logger = logging.getLogger('_RemoteInspectorThread')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
self._killed = False
self._requests = []
self._action_queue = []
self._action_queue_condition = threading.Condition()
self._action_specific_callback = None # Callback only for current action.
self._action_specific_callback_lock = threading.Lock()
self._general_callbacks = [] # General callbacks that can be long-lived.
self._general_callbacks_lock = threading.Lock()
self._condition_to_wait = None
# Create a DevToolsSocket client and wait for it to complete the remote
# debugging protocol handshake with the remote Chrome instance.
result = self._IdentifyDevToolsSocketConnectionInfo(tab_index)
self._client = _DevToolsSocketClient(
verbose, show_socket_messages, result['host'], result['port'],
result['path'])
self._client.inspector_thread = self
while asyncore.socket_map:
if self._client.handshake_done or self._killed:
break
asyncore.loop(timeout=1, count=1, use_poll=True)
def ClientSocketExceptionOccurred(self):
"""Notifies that the _DevToolsSocketClient encountered an exception."""
self.Kill()
def NotifyReply(self, msg):
"""Notifies of a reply message received from the remote Chrome instance.
Args:
msg: A string reply message received from the remote Chrome instance;
assumed to be a JSON message formatted according to the remote
debugging communication protocol in WebKit.
"""
reply_dict = simplejson.loads(msg)
# Notify callbacks of this message received from the remote inspector.
self._action_specific_callback_lock.acquire()
if self._action_specific_callback:
self._action_specific_callback(reply_dict)
self._action_specific_callback_lock.release()
self._general_callbacks_lock.acquire()
if self._general_callbacks:
for callback in self._general_callbacks:
callback(reply_dict)
self._general_callbacks_lock.release()
if 'result' in reply_dict:
# This is the result message associated with a previously-sent request.
request = self.GetRequestWithId(reply_dict['id'])
if request:
request.is_fulfilled_condition.acquire()
request.is_fulfilled_condition.notify()
request.is_fulfilled_condition.release()
def run(self):
"""Start this thread; overridden from threading.Thread."""
while not self._killed:
self._action_queue_condition.acquire()
if self._action_queue:
# There's a request to the remote inspector that needs to be processed.
messages, callback = self._action_queue.pop(0)
self._action_specific_callback_lock.acquire()
self._action_specific_callback = callback
self._action_specific_callback_lock.release()
# Prepare the request list.
for message_id, message in enumerate(messages):
self._requests.append(
_DevToolsSocketRequest(message[0], message[1], message_id))
# Send out each request. Wait until each request is complete before
# sending the next request.
for request in self._requests:
self._FillInParams(request)
self._client.SendMessage(str(request))
request.is_fulfilled_condition.acquire()
self._condition_to_wait = request.is_fulfilled
request.is_fulfilled_condition.wait()
request.is_fulfilled_condition.release()
if self._killed:
self._client.close()
return
# Clean up so things are ready for the next request.
self._requests = []
self._action_specific_callback_lock.acquire()
self._action_specific_callback = None
self._action_specific_callback_lock.release()
# Wait until there is something to process.
self._condition_to_wait = self._action_queue_condition
self._action_queue_condition.wait()
self._action_queue_condition.release()
self._client.close()
def Kill(self):
"""Notify this thread that it should stop executing."""
self._killed = True
# The thread might be waiting on a condition.
if self._condition_to_wait:
self._condition_to_wait.acquire()
self._condition_to_wait.notify()
self._condition_to_wait.release()
def PerformAction(self, request_messages, reply_message_callback):
"""Notify this thread of an action to perform using the remote inspector.
Args:
request_messages: A list of strings representing the requests to make
using the remote inspector.
reply_message_callback: A callable to be invoked any time a message is
received from the remote inspector while the current action is
being performed. The callable should accept a single argument,
which is a dictionary representing a message received.
"""
self._action_queue_condition.acquire()
self._action_queue.append((request_messages, reply_message_callback))
self._action_queue_condition.notify()
self._action_queue_condition.release()
def AddMessageCallback(self, callback):
"""Add a callback to invoke for messages received from the remote inspector.
Args:
callback: A callable to be invoked any time a message is received from the
remote inspector. The callable should accept a single argument, which
is a dictionary representing a message received.
"""
self._general_callbacks_lock.acquire()
self._general_callbacks.append(callback)
self._general_callbacks_lock.release()
def RemoveMessageCallback(self, callback):
"""Remove a callback from the set of those to invoke for messages received.
Args:
callback: A callable to remove from consideration.
"""
self._general_callbacks_lock.acquire()
self._general_callbacks.remove(callback)
self._general_callbacks_lock.release()
def GetRequestWithId(self, request_id):
"""Identifies the request with the specified id.
Args:
request_id: An integer request id; should be unique for each request.
Returns:
A request object associated with the given id if found, or
None otherwise.
"""
found_request = [x for x in self._requests if x.id == request_id]
if found_request:
return found_request[0]
return None
def GetFirstUnfulfilledRequest(self, method):
"""Identifies the first unfulfilled request with the given method name.
An unfulfilled request is one for which all relevant reply messages have
not yet been received from the remote inspector.
Args:
method: The string method name of the request for which to search.
Returns:
The first request object in the request list that is not yet fulfilled
and is also associated with the given method name, or
None if no such request object can be found.
"""
for request in self._requests:
if not request.is_fulfilled and request.method == method:
return request
return None
def _GetLatestRequestOfType(self, ref_req, method):
"""Identifies the latest specified request before a reference request.
This function finds the latest request with the specified method that
occurs before the given reference request.
Args:
ref_req: A reference request from which to start looking.
method: The string method name of the request for which to search.
Returns:
The latest _DevToolsSocketRequest object with the specified method,
if found, or None otherwise.
"""
start_looking = False
for request in self._requests[::-1]:
if request.id == ref_req.id:
start_looking = True
elif start_looking:
if request.method == method:
return request
return None
def _FillInParams(self, request):
"""Fills in parameters for requests as necessary before the request is sent.
Args:
request: The _DevToolsSocketRequest object associated with a request
message that is about to be sent.
"""
if request.method == 'Profiler.takeHeapSnapshot':
# We always want detailed v8 heap snapshot information.
request.params = {'detailed': True}
elif request.method == 'Profiler.getProfile':
# To actually request the snapshot data from a previously-taken snapshot,
# we need to specify the unique uid of the snapshot we want.
# The relevant uid should be contained in the last
# 'Profiler.takeHeapSnapshot' request object.
last_req = self._GetLatestRequestOfType(request,
'Profiler.takeHeapSnapshot')
if last_req and 'uid' in last_req.results:
request.params = {'type': 'HEAP', 'uid': last_req.results['uid']}
@staticmethod
def _IdentifyDevToolsSocketConnectionInfo(tab_index):
"""Identifies DevToolsSocket connection info from a remote Chrome instance.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
which to connect.
Returns:
A dictionary containing the DevToolsSocket connection info:
{
'host': string,
'port': integer,
'path': string,
}
Raises:
RuntimeError: When DevToolsSocket connection info cannot be identified.
"""
try:
# TODO(dennisjeffrey): Do not assume port 9222. The port should be passed
# as input to this function.
f = urllib2.urlopen('http://localhost:9222/json')
result = f.read();
result = simplejson.loads(result)
except urllib2.URLError, e:
raise RuntimeError(
'Error accessing Chrome instance debugging port: ' + str(e))
if tab_index >= len(result):
raise RuntimeError(
'Specified tab index %d doesn\'t exist (%d tabs found)' %
(tab_index, len(result)))
if 'webSocketDebuggerUrl' not in result[tab_index]:
raise RuntimeError('No socket URL exists for the specified tab.')
socket_url = result[tab_index]['webSocketDebuggerUrl']
parsed = urlparse.urlparse(socket_url)
# On ChromeOS, the "ws://" scheme may not be recognized, leading to an
# incorrect netloc (and empty hostname and port attributes) in |parsed|.
# Change the scheme to "http://" to fix this.
if not parsed.hostname or not parsed.port:
socket_url = 'http' + socket_url[socket_url.find(':'):]
parsed = urlparse.urlparse(socket_url)
# Warning: |parsed.scheme| is incorrect after this point.
return ({'host': parsed.hostname,
'port': parsed.port,
'path': parsed.path})
class _RemoteInspectorDriverThread(threading.Thread):
"""Drives the communication service with the remote inspector."""
def __init__(self):
"""Initialize."""
threading.Thread.__init__(self)
def run(self):
"""Drives the communication service with the remote inspector."""
try:
while asyncore.socket_map:
asyncore.loop(timeout=1, count=1, use_poll=True)
except KeyboardInterrupt:
pass
class _V8HeapSnapshotParser(object):
"""Parses v8 heap snapshot data."""
_CHILD_TYPES = ['context', 'element', 'property', 'internal', 'hidden',
'shortcut', 'weak']
_NODE_TYPES = ['hidden', 'array', 'string', 'object', 'code', 'closure',
'regexp', 'number', 'native', 'synthetic']
@staticmethod
def ParseSnapshotData(raw_data):
"""Parses raw v8 heap snapshot data and returns the summarized results.
The raw heap snapshot data is represented as a JSON object with the
following keys: 'snapshot', 'nodes', and 'strings'.
The 'snapshot' value provides the 'title' and 'uid' attributes for the
snapshot. For example:
{ u'title': u'org.webkit.profiles.user-initiated.1', u'uid': 1}
The 'nodes' value is a list of node information from the v8 heap, with a
special first element that describes the node serialization layout (see
HeapSnapshotJSONSerializer::SerializeNodes). All other list elements
contain information about nodes in the v8 heap, according to the
serialization layout.
The 'strings' value is a list of strings, indexed by values in the 'nodes'
list to associate nodes with strings.
Args:
raw_data: A string representing the raw v8 heap snapshot data.
Returns:
A dictionary containing the summarized v8 heap snapshot data:
{
'total_v8_node_count': integer, # Total number of nodes in the v8 heap.
'total_shallow_size': integer, # Total heap size, in bytes.
}
"""
total_node_count = 0
total_shallow_size = 0
constructors = {}
# TODO(dennisjeffrey): The following line might be slow, especially on
# ChromeOS. Investigate faster alternatives.
heap = simplejson.loads(raw_data)
index = 1 # Bypass the special first node list item.
node_list = heap['nodes']
while index < len(node_list):
node_type = node_list[index]
node_name = node_list[index + 1]
node_id = node_list[index + 2]
node_self_size = node_list[index + 3]
node_retained_size = node_list[index + 4]
node_dominator = node_list[index + 5]
node_children_count = node_list[index + 6]
index += 7
node_children = []
for i in xrange(node_children_count):
child_type = node_list[index]
child_type_string = _V8HeapSnapshotParser._CHILD_TYPES[int(child_type)]
child_name_index = node_list[index + 1]
child_to_node = node_list[index + 2]
index += 3
child_info = {
'type': child_type_string,
'name_or_index': child_name_index,
'to_node': child_to_node,
}
node_children.append(child_info)
# Get the constructor string for this node so nodes can be grouped by
# constructor.
# See HeapSnapshot.js: WebInspector.HeapSnapshotNode.prototype.
type_string = _V8HeapSnapshotParser._NODE_TYPES[int(node_type)]
constructor_name = None
if type_string == 'hidden':
constructor_name = '(system)'
elif type_string == 'object':
constructor_name = heap['strings'][int(node_name)]
elif type_string == 'native':
pos = heap['strings'][int(node_name)].find('/')
if pos >= 0:
constructor_name = heap['strings'][int(node_name)][:pos].rstrip()
else:
constructor_name = heap['strings'][int(node_name)]
elif type_string == 'code':
constructor_name = '(compiled code)'
else:
constructor_name = '(' + type_string + ')'
node_obj = {
'type': type_string,
'name': heap['strings'][int(node_name)],
'id': node_id,
'self_size': node_self_size,
'retained_size': node_retained_size,
'dominator': node_dominator,
'children_count': node_children_count,
'children': node_children,
}
if constructor_name not in constructors:
constructors[constructor_name] = []
constructors[constructor_name].append(node_obj)
total_node_count += 1
total_shallow_size += node_self_size
# TODO(dennisjeffrey): Have this function also return more detailed v8
# heap snapshot data when a need for it arises (e.g., using |constructors|).
result = {}
result['total_v8_node_count'] = total_node_count
result['total_shallow_size'] = total_shallow_size
return result
# TODO(dennisjeffrey): The "verbose" option used in this file should re-use
# pyauto's verbose flag.
class RemoteInspectorClient(object):
"""Main class for interacting with Chrome's remote inspector.
Upon initialization, a socket connection to Chrome's remote inspector will
be established. Users of this class should call Stop() to close the
connection when it's no longer needed.
Public Methods:
Stop: Close the connection to the remote inspector. Should be called when
a user is done using this module.
HeapSnapshot: Takes a v8 heap snapshot and returns the summarized data.
GetMemoryObjectCounts: Retrieves memory object count information.
CollectGarbage: Forces a garbage collection.
StartTimelineEventMonitoring: Starts monitoring for timeline events.
StopTimelineEventMonitoring: Stops monitoring for timeline events.
"""
# TODO(dennisjeffrey): Allow a user to specify a window index too (not just a
# tab index), when running through PyAuto.
def __init__(self, tab_index=0, verbose=False, show_socket_messages=False):
"""Initialize.
Args:
tab_index: The integer index of the tab in the remote Chrome instance to
which to connect. Defaults to 0 (the first tab).
verbose: A boolean indicating whether or not to use verbose logging.
show_socket_messages: A boolean indicating whether or not to show the
socket messages sent/received when communicating
with the remote Chrome instance.
"""
self._tab_index = tab_index
self._verbose = verbose
self._show_socket_messages = show_socket_messages
self._timeline_started = False
logging.basicConfig()
self._logger = logging.getLogger('RemoteInspectorClient')
self._logger.setLevel([logging.WARNING, logging.DEBUG][verbose])
# Creating _RemoteInspectorThread might raise an exception. This prevents an
# AttributeError in the destructor.
self._remote_inspector_thread = None
self._remote_inspector_driver_thread = None
# Start up a thread for long-term communication with the remote inspector.
self._remote_inspector_thread = _RemoteInspectorThread(
tab_index, verbose, show_socket_messages)
self._remote_inspector_thread.start()
# At this point, a connection has already been made to the remote inspector.
# This thread calls asyncore.loop, which activates the channel service.
self._remote_inspector_driver_thread = _RemoteInspectorDriverThread()
self._remote_inspector_driver_thread.start()
def __del__(self):
"""Called on destruction of this object."""
self.Stop()
def Stop(self):
"""Stop/close communication with the remote inspector."""
if self._remote_inspector_thread:
self._remote_inspector_thread.Kill()
self._remote_inspector_thread.join()
self._remote_inspector_thread = None
if self._remote_inspector_driver_thread:
self._remote_inspector_driver_thread.join()
self._remote_inspector_driver_thread = None
def HeapSnapshot(self, include_summary=False):
"""Takes a v8 heap snapshot.
Returns:
A dictionary containing information for a single v8 heap
snapshot that was taken.
{
'url': string, # URL of the webpage that was snapshotted.
'raw_data': string, # The raw data as JSON string.
'total_v8_node_count': integer, # Total number of nodes in the v8 heap.
# Only if |include_summary| is True.
'total_heap_size': integer, # Total v8 heap size (number of bytes).
# Only if |include_summary| is True.
}
"""
HEAP_SNAPSHOT_MESSAGES = [
('Page.getResourceTree', {}),
('Debugger.enable', {}),
('Profiler.clearProfiles', {}),
('Profiler.takeHeapSnapshot', {}),
('Profiler.getProfile', {}),
]
self._current_heap_snapshot = []
self._url = ''
self._collected_heap_snapshot_data = {}
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote inspector.
"""
if 'result' in reply_dict:
# This is the result message associated with a previously-sent request.
request = self._remote_inspector_thread.GetRequestWithId(
reply_dict['id'])
if 'frameTree' in reply_dict['result']:
self._url = reply_dict['result']['frameTree']['frame']['url']
elif 'method' in reply_dict:
# This is an auxiliary message sent from the remote Chrome instance.
if reply_dict['method'] == 'Profiler.addProfileHeader':
snapshot_req = (
self._remote_inspector_thread.GetFirstUnfulfilledRequest(
'Profiler.takeHeapSnapshot'))
if snapshot_req:
snapshot_req.results['uid'] = reply_dict['params']['header']['uid']
elif reply_dict['method'] == 'Profiler.addHeapSnapshotChunk':
self._current_heap_snapshot.append(reply_dict['params']['chunk'])
elif reply_dict['method'] == 'Profiler.finishHeapSnapshot':
# A heap snapshot has been completed. Analyze and output the data.
self._logger.debug('Heap snapshot taken: %s', self._url)
# TODO(dennisjeffrey): Parse the heap snapshot on-the-fly as the data
# is coming in over the wire, so we can avoid storing the entire
# snapshot string in memory.
raw_snapshot_data = ''.join(self._current_heap_snapshot)
self._collected_heap_snapshot_data = {
'url': self._url,
'raw_data': raw_snapshot_data}
if include_summary:
self._logger.debug('Now analyzing heap snapshot...')
parser = _V8HeapSnapshotParser()
time_start = time.time()
self._logger.debug('Raw snapshot data size: %.2f MB',
len(raw_snapshot_data) / (1024.0 * 1024.0))
result = parser.ParseSnapshotData(raw_snapshot_data)
self._logger.debug('Time to parse data: %.2f sec',
time.time() - time_start)
count = result['total_v8_node_count']
self._collected_heap_snapshot_data['total_v8_node_count'] = count
total_size = result['total_shallow_size']
self._collected_heap_snapshot_data['total_heap_size'] = total_size
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to take a v8 heap snapshot, then wait until
# the snapshot information is available to return.
self._remote_inspector_thread.PerformAction(HEAP_SNAPSHOT_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return self._collected_heap_snapshot_data
def EvaluateJavaScript(self, expression):
"""Evaluates a JavaScript expression and returns the result.
Sends a message containing the expression to the remote Chrome instance we
are connected to, and evaluates it in the context of the tab we are
connected to. Blocks until the result is available and returns it.
Returns:
A dictionary representing the result.
"""
EVALUATE_MESSAGES = [
('Runtime.evaluate', { 'expression': expression,
'objectGroup': 'group',
'returnByValue': True }),
('Runtime.releaseObjectGroup', { 'objectGroup': 'group' })
]
self._result = None
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if 'result' in reply_dict and 'result' in reply_dict['result']:
self._result = reply_dict['result']['result']['value']
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to evaluate the given expression, then wait
# until that information is available to return.
self._remote_inspector_thread.PerformAction(EVALUATE_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return self._result
def GetMemoryObjectCounts(self):
"""Retrieves memory object count information.
Returns:
A dictionary containing the memory object count information:
{
'DOMNodeCount': integer, # Total number of DOM nodes.
'EventListenerCount': integer, # Total number of event listeners.
}
"""
MEMORY_COUNT_MESSAGES = [
('Memory.getDOMNodeCount', {})
]
self._event_listener_count = None
self._dom_node_count = None
done_condition = threading.Condition()
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if 'result' in reply_dict and 'domGroups' in reply_dict['result']:
event_listener_count = 0
dom_node_count = 0
dom_group_list = reply_dict['result']['domGroups']
for dom_group in dom_group_list:
listener_array = dom_group['listenerCount']
for listener in listener_array:
event_listener_count += listener['count']
dom_node_array = dom_group['nodeCount']
for dom_element in dom_node_array:
dom_node_count += dom_element['count']
self._event_listener_count = event_listener_count
self._dom_node_count = dom_node_count
done_condition.acquire()
done_condition.notify()
done_condition.release()
# Tell the remote inspector to collect memory count info, then wait until
# that information is available to return.
self._remote_inspector_thread.PerformAction(MEMORY_COUNT_MESSAGES,
HandleReply)
done_condition.acquire()
done_condition.wait()
done_condition.release()
return {
'DOMNodeCount': self._dom_node_count,
'EventListenerCount': self._event_listener_count,
}
def CollectGarbage(self):
"""Forces a garbage collection."""
COLLECT_GARBAGE_MESSAGES = [
('Profiler.collectGarbage', {})
]
# Tell the remote inspector to do a garbage collect. We can return
# immediately, since there is no result for which to wait.
self._remote_inspector_thread.PerformAction(COLLECT_GARBAGE_MESSAGES, None)
def StartTimelineEventMonitoring(self, event_callback):
"""Starts timeline event monitoring.
Args:
event_callback: A callable to invoke whenever a timeline event is observed
from the remote inspector. The callable should take a single input,
which is a dictionary containing the detailed information of a
timeline event.
"""
if self._timeline_started:
self._logger.warning('Timeline monitoring already started.')
return
TIMELINE_MESSAGES = [
('Timeline.start', {})
]
self._event_callback = event_callback
def HandleReply(reply_dict):
"""Processes a reply message received from the remote Chrome instance.
Args:
reply_dict: A dictionary object representing the reply message received
from the remote Chrome instance.
"""
if reply_dict.get('method') == 'Timeline.eventRecorded':
self._event_callback(reply_dict['params']['record'])
# Tell the remote inspector to start the timeline. We can return
# immediately, since there is no result for which to wait.
self._timeline_callback = HandleReply
self._remote_inspector_thread.AddMessageCallback(self._timeline_callback)
self._remote_inspector_thread.PerformAction(TIMELINE_MESSAGES, None)
self._timeline_started = True
def StopTimelineEventMonitoring(self):
"""Stops timeline event monitoring."""
if not self._timeline_started:
self._logger.warning('Timeline monitoring already stopped.')
return
TIMELINE_MESSAGES = [
('Timeline.stop', {})
]
# Tell the remote inspector to stop the timeline. We can return
# immediately, since there is no result for which to wait.
self._remote_inspector_thread.RemoveMessageCallback(self._timeline_callback)
self._remote_inspector_thread.PerformAction(TIMELINE_MESSAGES, None)
self._timeline_started = False
def _ConvertByteCountToHumanReadableString(self, num_bytes):
"""Converts an integer number of bytes into a human-readable string.
Args:
num_bytes: An integer number of bytes.
Returns:
A human-readable string representation of the given number of bytes.
"""
if num_bytes < 1024:
return '%d B' % num_bytes
elif num_bytes < 1048576:
return '%.2f KB' % (num_bytes / 1024.0)
else:
return '%.2f MB' % (num_bytes / 1048576.0)
|
bsd-3-clause
| 7,801,864,265,520,846,000
| 37.694471
| 80
| 0.657643
| false
| 4.253092
| false
| false
| false
|
jgehring/Laudio
|
laudio/src/song/formats/mp3.py
|
1
|
2091
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Laudio - A webbased musicplayer
Copyright (C) 2010 Bernhard Posselt, bernhard.posselt@gmx.at
Laudio is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Laudio is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from time import time
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3NoHeaderError
from laudio.src.song.song import Song
class MP3Song (Song):
def __init__(self, path):
""" Read metainformation from an ogg file
The multiple KeyErrors check if tags are not Null
Keyword arguments:
path -- the full path to the song
"""
self.codec = "mp3"
self.path = path
self.song = MP3(self.path)
try:
self.id3 = EasyID3(self.path)
for key in ('title', 'artist', 'album', 'genre', 'date', 'tracknumber'):
attr = self.id3.get(key, ('',))[0]
setattr(self, key, attr.encode("utf-8") )
self.bitrate = int(self.song.info.bitrate) / 1000
self.length = int(self.song.info.length)
# check if tracknumber is numeric
if not self.tracknumber.isdigit():
self.tracknumber = 0
# except no id3 tags
except (ID3NoHeaderError, AttributeError):
for key in ('title', 'artist', 'album', 'genre', 'date'):
setattr(self, key, "")
self.tracknumber = 0
self.bitrate = 0
self.length = 0
self.title = os.path.basename(self.path)
|
gpl-3.0
| 3,334,994,844,228,916,000
| 34.440678
| 84
| 0.634146
| false
| 3.84375
| false
| false
| false
|
Wopple/fimbulvetr
|
src/client/mapmode_c.py
|
1
|
3743
|
import os
import sys
import pygame
from pygame.locals import *
from common import mvc
from common.constants import *
from client.constants import *
class Controller(mvc.Controller):
def __init__(self, model=None, screen=None):
super(Controller, self).__init__()
self.shift = [False, False]
def update(self):
if self.model.initialCount == 0:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.model.leftClick()
elif event.button == 3:
self.model.rightClick(True in self.shift)
elif event.button == 4:
self.model.scrollIn()
elif event.button == 5:
self.model.scrollOut()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit(0)
elif event.key == K_SPACE:
self.model.pausePressed()
elif event.key == K_1:
self.model.numberKey(1)
elif event.key == K_2:
self.model.numberKey(2)
elif event.key == K_3:
self.model.numberKey(3)
elif event.key == K_4:
self.model.numberKey(4)
elif event.key == K_5:
self.model.numberKey(5)
elif event.key == K_6:
self.model.numberKey(6)
elif event.key == K_7:
self.model.numberKey(7)
elif event.key == K_8:
self.model.numberKey(8)
elif event.key == K_9:
self.model.numberKey(9)
elif event.key == K_UP:
self.model.key(0, True)
elif event.key == K_DOWN:
self.model.key(1, True)
elif event.key == K_LEFT:
self.model.key(2, True)
elif event.key == K_RIGHT:
self.model.key(3, True)
elif event.key == K_PAGEUP:
self.model.key(4, True)
elif event.key == K_PAGEDOWN:
self.model.key(5, True)
elif event.key == K_F1:
self.model.testKey(1)
elif event.key == K_F2:
self.model.testKey(2)
elif event.key == K_LSHIFT:
self.shift[0] = True
elif event.key == K_RSHIFT:
self.shift[1] = True
elif event.type == pygame.KEYUP:
if event.key == K_UP:
self.model.key(0, False)
elif event.key == K_DOWN:
self.model.key(1, False)
elif event.key == K_LEFT:
self.model.key(2, False)
elif event.key == K_RIGHT:
self.model.key(3, False)
elif event.key == K_PAGEUP:
self.model.key(4, False)
elif event.key == K_PAGEDOWN:
self.model.key(5, False)
elif event.key == K_LSHIFT:
self.shift[0] = False
elif event.key == K_RSHIFT:
self.shift[1] = False
elif event.type == pygame.QUIT:
sys.exit(0)
|
bsd-3-clause
| 4,532,720,471,375,591,000
| 40.131868
| 65
| 0.40716
| false
| 4.509639
| false
| false
| false
|
asenovm/book-search-server
|
crawler/chitanka_crawler_get_books_list.py
|
1
|
3790
|
import requests
import time
import os
import sqlite3
def init_make_request():
global conn
global last_hour
global last_minute
global queries_for_last_minute
global queries_for_last_hour
last_hour = time.clock()
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour = 0
conn = sqlite3.connect('chitanka.db')
def make_request(req):
global last_minute
global last_hour
global queries_for_last_minute
global queries_for_last_hour
time.sleep(2)
while queries_for_last_hour > 175:
delta = time.clock() - last_hour
if delta < 3600:
print "queries limit for hour reached, %d minutes remaining" % int(60-delta/60)
time.sleep(60)
else:
last_hour = time.clock()
queries_for_last_hour = 0
while queries_for_last_minute > 18:
delta = time.clock() - last_hour
if delta < 60:
print "queries limit for minute reached, %d seconds remaining" % int(60-delta)
time.sleep(10)
else:
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour += 1
queries_for_last_minute += 1
proxy = {'http': 'http://93.123.45.23:8008'}
#r = requests.get(req, proxies = proxy)
r = requests.get(req)
return r
def find_books_in_text(text):
global conn
#print text
c = conn.cursor()
ind = 0
ind = text.find('<span>epub</span></a></li>', ind)
while ind != -1:
ind = ind + 26
ind = text.find('"', ind)
ind = ind + 1
book_name = text[ind:text.find('"', ind)]
#print book_name
c.execute('select * from books where name="%s"' % book_name)
if len(c.fetchall()) == 0:
c.execute('insert into books values ("%s", 0)' % book_name)
conn.commit()
print 'new book found: %s' % book_name
ind = text.find('<span>epub</span></a></li>', ind)
c.close()
def main():
global conn
c = conn.cursor()
c.execute('select * from categories')
cats = c.fetchall()
flag = True
for category in cats:
print 'getting books in %s' % str(category[0])
if str(category[0]) == 'savremenni-romani-i-povesti':
flag = False
if flag:
continue
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0])
break
except:
print "exception"
time.sleep(30)
find_books_in_text(r.text)
pagination = r.text.find('<ul class="pagination">')
if pagination != -1:
ind = r.text.find('<li class="next">')
while r.text[ind] != '"':
ind = ind - 1
ind = ind + 2
second_ind = ind + 1
while r.text[second_ind] != '<':
second_ind = second_ind + 1
pages_count = int(r.text[ind:second_ind])
for i in range(1, pages_count):
print 'category page %d' % (i+1)
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0]+'.html/'+str(i+1))
break
except:
print "except"
time.sleep(30)
find_books_in_text(r.text)
c.close()
if __name__ == '__main__':
init_make_request()
main()
|
mit
| -2,583,254,922,830,167,000
| 27.719697
| 114
| 0.492348
| false
| 3.907216
| false
| false
| false
|
aashish24/dataset
|
dataset/persistence/table.py
|
1
|
13492
|
import logging
from itertools import count
from sqlalchemy.sql import and_, expression
from sqlalchemy.schema import Column, Index
from dataset.persistence.util import guess_type
from dataset.persistence.util import ResultIter
from dataset.util import DatasetException
log = logging.getLogger(__name__)
class Table(object):
def __init__(self, database, table):
self.indexes = dict([(i.name, i) for i in table.indexes])
self.database = database
self.table = table
self._is_dropped = False
@property
def columns(self):
"""
Get a listing of all columns that exist in the table.
>>> print 'age' in table.columns
True
"""
return set(self.table.columns.keys())
def drop(self):
"""
Drop the table from the database, deleting both the schema
and all the contents within it.
Note: the object will raise an Exception if you use it after
dropping the table. If you want to re-create the table, make
sure to get a fresh instance from the :py:class:`Database <dataset.Database>`.
"""
self.database._acquire()
self._is_dropped = True
self.database._tables.pop(self.table.name, None)
self.table.drop(self.database.engine)
def _check_dropped(self):
if self._is_dropped:
raise DatasetException('the table has been dropped. this object should not be used again.')
def insert(self, row, ensure=True, types={}):
"""
Add a row (type: dict) by inserting it into the table.
If ``ensure`` is set, any of the keys of the row are not
table columns, they will be created automatically.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(title='I am a banana!')
table.insert(data)
"""
self._check_dropped()
if ensure:
self._ensure_columns(row, types=types)
res = self.database.executable.execute(self.table.insert(row))
return res.lastrowid
def insert_many(self, rows, chunk_size=1000, ensure=True, types={}):
"""
Add many rows at a time, which is significantly faster than adding
them one by one. Per default the rows are processed in chunks of
1000 per commit, unless you specify a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
def _process_chunk(chunk):
if ensure:
for row in chunk:
self._ensure_columns(row, types=types)
self.table.insert().execute(chunk)
self._check_dropped()
chunk = []
i = 0
for row in rows:
chunk.append(row)
i += 1
if i == chunk_size:
_process_chunk(chunk)
chunk = []
i = 0
if i > 0:
_process_chunk(chunk)
def update(self, row, keys, ensure=True, types={}):
"""
Update a row in the table. The update is managed via
the set of column names stated in ``keys``: they will be
used as filters for the data to be updated, using the values
in ``row``.
::
# update all entries with id matching 10, setting their title columns
data = dict(id=10, title='I am a banana!')
table.update(data, ['id'])
If keys in ``row`` update columns not present in the table,
they will be created based on the settings of ``ensure`` and
``types``, matching the behavior of :py:meth:`insert() <dataset.Table.insert>`.
"""
# check whether keys arg is a string and format as a list
if isinstance(keys, basestring):
keys = [keys]
self._check_dropped()
if not len(keys) or len(keys)==len(row):
return False
clause = [(u, row.get(u)) for u in keys]
"""
Don't update the key itself, so remove any keys from the row dict
"""
for key in keys:
if key in row.keys():
del row[key]
if ensure:
self._ensure_columns(row, types=types)
try:
filters = self._args_to_clause(dict(clause))
stmt = self.table.update(filters, row)
rp = self.database.executable.execute(stmt)
return rp.rowcount > 0
except KeyError:
return False
def upsert(self, row, keys, ensure=True, types={}):
"""
An UPSERT is a smart combination of insert and update. If rows with matching ``keys`` exist
they will be updated, otherwise a new row is inserted in the table.
::
data = dict(id=10, title='I am a banana!')
table.upsert(data, ['id'])
"""
# check whether keys arg is a string and format as a list
if isinstance(keys, basestring):
keys = [keys]
self._check_dropped()
if ensure:
self.create_index(keys)
filters = {}
for key in keys:
filters[key] = row.get(key)
if self.find_one(**filters) is not None:
self.update(row, keys, ensure=ensure, types=types)
else:
self.insert(row, ensure=ensure, types=types)
def delete(self, **_filter):
""" Delete rows from the table. Keyword arguments can be used
to add column-based filters. The filter criterion will always
be equality:
.. code-block:: python
table.delete(place='Berlin')
If no arguments are given, all records are deleted.
"""
self._check_dropped()
if len(_filter) > 0:
q = self._args_to_clause(_filter)
stmt = self.table.delete(q)
else:
stmt = self.table.delete()
self.database.executable.execute(stmt)
def _ensure_columns(self, row, types={}):
for column in set(row.keys()) - set(self.table.columns.keys()):
if column in types:
_type = types[column]
else:
_type = guess_type(row[column])
log.debug("Creating column: %s (%s) on %r" % (column,
_type, self.table.name))
self.create_column(column, _type)
def _args_to_clause(self, args):
self._ensure_columns(args)
clauses = []
for k, v in args.items():
if isinstance(v, list) or isinstance(v, tuple):
clauses.append(self.table.c[k].in_(v))
else:
clauses.append(self.table.c[k] == v)
return and_(*clauses)
def create_column(self, name, type):
"""
Explicitely create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime)
"""
self._check_dropped()
self.database._acquire()
try:
if name not in self.table.columns.keys():
col = Column(name, type)
col.create(self.table,
connection=self.database.executable)
finally:
self.database._release()
def create_index(self, columns, name=None):
"""
Create an index to speed up queries on a table. If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country'])
"""
self._check_dropped()
if not name:
sig = abs(hash('||'.join(columns)))
name = 'ix_%s_%s' % (self.table.name, sig)
if name in self.indexes:
return self.indexes[name]
try:
self.database._acquire()
columns = [self.table.c[c] for c in columns]
idx = Index(name, *columns)
idx.create(self.database.engine)
except:
idx = None
finally:
self.database._release()
self.indexes[name] = idx
return idx
def find_one(self, **_filter):
"""
Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States')
"""
self._check_dropped()
args = self._args_to_clause(_filter)
query = self.table.select(whereclause=args, limit=1)
rp = self.database.executable.execute(query)
return rp.fetchone()
def _args_to_order_by(self, order_by):
if order_by[0] == '-':
return self.table.c[order_by[1:]].desc()
else:
return self.table.c[order_by].asc()
def find(self, _limit=None, _offset=0, _step=5000,
order_by='id', **_filter):
"""
Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
By default :py:meth:`find() <dataset.Table.find>` will break the
query into chunks of ``_step`` rows to prevent huge tables
from being loaded into memory at once.
For more complex queries, please use :py:meth:`db.query() <dataset.Database.query>`
instead."""
self._check_dropped()
if isinstance(order_by, (str, unicode)):
order_by = [order_by]
order_by = filter(lambda o: o in self.table.columns, order_by)
order_by = [self._args_to_order_by(o) for o in order_by]
args = self._args_to_clause(_filter)
# query total number of rows first
count_query = self.table.count(whereclause=args, limit=_limit, offset=_offset)
rp = self.database.executable.execute(count_query)
total_row_count = rp.fetchone()[0]
if _step is None or _step is False or _step == 0:
_step = total_row_count
if total_row_count > _step and len(order_by) == 0:
_step = total_row_count
log.warn("query cannot be broken into smaller sections because it is unordered")
queries = []
for i in count():
qoffset = _offset + (_step * i)
qlimit = _step
if _limit is not None:
qlimit = min(_limit - (_step * i), _step)
if qlimit <= 0:
break
if qoffset > total_row_count:
break
queries.append(self.table.select(whereclause=args, limit=qlimit,
offset=qoffset, order_by=order_by))
return ResultIter((self.database.executable.execute(q) for q in queries))
def __len__(self):
"""
Returns the number of rows in the table.
"""
d = self.database.query(self.table.count()).next()
return d.values().pop()
def distinct(self, *columns, **_filter):
"""
Returns all rows of a table, but removes rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
"""
self._check_dropped()
qargs = []
try:
columns = [self.table.c[c] for c in columns]
for col, val in _filter.items():
qargs.append(self.table.c[col] == val)
except KeyError:
return []
q = expression.select(columns, distinct=True,
whereclause=and_(*qargs),
order_by=[c.asc() for c in columns])
return self.database.query(q)
def all(self):
"""
Returns all rows of the table as simple dictionaries. This is simply a shortcut
to *find()* called with no arguments.
::
rows = table.all()"""
return self.find()
def __iter__(self):
"""
Allows for iterating over all rows in the table without explicetly
calling :py:meth:`all() <dataset.Table.all>`.
::
for row in table:
print row
"""
return self.all()
|
mit
| 4,836,438,356,623,720,000
| 33.863049
| 109
| 0.552698
| false
| 4.237437
| false
| false
| false
|
wooga/airflow
|
airflow/providers/apache/hive/sensors/hive_partition.py
|
1
|
3014
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: str
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: str
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60 * 3,
*args,
**kwargs):
super().__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.log.info(
'Poking for table %s.%s, partition %s', self.schema, self.table, self.partition
)
if not hasattr(self, 'hook'):
hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return hook.check_for_partition(
self.schema, self.table, self.partition)
|
apache-2.0
| 333,490,660,544,972,600
| 39.72973
| 91
| 0.661911
| false
| 4.14011
| false
| false
| false
|
carpedm20/between
|
between/client.py
|
1
|
15097
|
# -*- coding: UTF-8 -*-
"""
between.client
~~~~~~~~~~~~~~
This module contains the client for Between.
"""
import json
import requests
import websocket
from uuid import uuid1
from datetime import datetime
from mimetypes import MimeTypes
from random import random, choice
from .utils import make_url
from .models import Person, Message, Image
from .preloads import sticker_tokens
from .exceptions import LoginError, AuthenticateError, MessageError
class Client(object):
"""A client for the Between.
See http://github.com/carpedm20/between for complete
documentation for the API.
"""
def __init__(self, email, password, debug=True, user_agent=None):
"""A client for the Between
:param email: Between account `email`
:param password: Between account password
import between
client = between.Client(email, password)
"""
self.email = email
self.headers = {'User-Agent': 'python-between/1.0.0'}
self.uuid = str(uuid1())
self.me = None
self.lover = None
self._session = requests.Session()
self._request_id = 0
self.login(email, password)
self.start()
def start(self):
self.get_status()
self.set_device()
self.get_endpoints()
self.authenticate()
def get(self, url, payload=None, is_json=True):
r = self._session.get(make_url(url), params=payload, headers=self.headers)
if is_json:
return json.loads(r.text)
else:
return r.text
def post(self, url, files=None, payload=None, is_json=True):
r = self._session.post(make_url(url), data=payload, headers=self.headers, files=files)
if is_json:
return json.loads(r.text)
else:
return r.text
def delete(self, url, files=None, payload=None, is_json=True):
r = self._session.delete(make_url(url), data=payload, headers=self.headers, files=files)
if is_json:
return json.loads(r.text)
else:
return r.text
def login(self, email, password):
"""Login to Between server
:param email: Between account `email`
:param password: Between account password
"""
payload = {
"email" : email,
"password" : password,
"session[type]" : "S_WINDOWS",
"session[name]" : "carpedm20",
}
j = self.get("/authentication/getAccessTokenV2", payload)
if j.has_key("error"):
raise LoginError(j["error"]["message"])
self.access_token = j["access_token"]
self.account_id = j["account_id"]
self.expires_at = j["expires_at"]
self.relationship_id = j["relationship_id"]
self.session_id = j["session_id"] # account_id + "xxx"
self.user_id = j["user_id"]
self.headers["x-between-authorization"] = self.access_token
def authenticate(self):
payload = {
"name" : "basicAuthenticate",
"body" : {
"access_token" : self.access_token,
"user_agent" : "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Between-PC/0.3.1 Safari/537.36",
"device_uuid" : self.uuid
}
}
j = self._send("/authentication", payload)
if not j["m"]["body"]["success"]:
raise AuthenticateError(j)
payload = {"name" : "get"}
j = self._send("/%s/threads" % self.user_id, payload)
data = j["m"]["body"]["data"][0]
self.thread_id = data["id"]
self.chatroom = data["chatroom"]
self.chatroom_id = data["chatroom_id"]
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "SUBSCRIPTIONS",
"path" : "/subscriptions",
"subscriptionsReq":{
"methodName" : "ADD_V4",
"addV4Req":{
"subscriptions":[
{
"path" : "/%s" % self.thread_id,
"recursive":True
},
{
"path" : "/%s" % self.chatroom_id,
"recursive":True
},
{
"path" : "/%s/push" % self.account_id,
"recursive":True,
"push_priority" : "HIGH"
}
]
}
}
},
{
"objectName" : "CHAT_ROOM",
"path" : "/%s" % self.chatroom_id,
"chatRoomReq":{
"methodName" : "GET"
}
}
]
}
}
j = self._send("/batch", payload)
if not j["m"]["body"]["data"][0]["success"]:
raise AuthenticateError(j)
def send(self, content):
"""Send a message
:param content: message content to send
"""
try:
content = content.decode('utf-8')
except:
pass
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"content" : content
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def send_sticker(self, sticker_id=None):
"""Send a sticker
:param sticker: message content to send
"""
if not sticker_id:
sticker_id = choice(sticker_tokens.keys())
try:
token = sticker_tokens[sticker_id]
except:
raise MessageError("Don't have sticker token information of %s" % sticker_id)
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"attachments" : [
{
"attachment_type" : "T_STICKER_V2",
"sticker" : {
"sticker_id" : str(sticker_id),
"sticker_token" : token
}
}
]
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def send_image(self, path=None, image_id=None):
"""Send an image
:param path: path of image to upload
"""
if not path and not image_id:
raise MessageError("path or image_id should be passed")
if not image_id:
image_id = self.upload_image(path)._id
payload = {
"name" : "batch",
"body" : {
"requests":[
{
"objectName" : "MESSAGES",
"path" : "/%s/messages" % self.thread_id,
"messagesReq" : {
"methodName" : "ADD",
"addReq" : {
"message" : {
"attachments" : [
{
"attachment_type" : "T_IMAGE",
"reference" : image_id
}
]
}
}
}
},
{
"objectName" : "CHAT_MEMBER_STATE",
"path" : "/chatMemberState",
"chatMemberStateReq" : {
"methodName" : "EDIT",
"editReq" : {
"state_param" : {
"state" : "ST_ACTIVE"
}
}
}
}
]
}
}
j = self._send("/batch", payload)
#if not j["m"]["body"]["data"][0]["success"]:
# raise MessageError(j)
def get_images(self, limit=64):
"""Get uploaded images
:param limit: the maximum number of images
"""
payload = {
"range[limit]" : limit,
"file_types[]" : "FT_IMAGE",
"file_types[]" : "FT_VOUCHER"
}
#j = self.get("/%s/messages/byFileType" % self.thread_id, payload)
url = "/%s/messages/byFileType?range[limit]=%s&file_types[]=FT_IMAGE&file_types[]=FT_VOUCHER" % (self.thread_id, limit)
j = self.get(url)
if j["status"] == "ERROR":
raise MessageError(j)
return j
def get_recent_messages(self, limit=32):
"""Get recent messages
:param limit: the maximum number of messages
"""
payload = {
"name" : "getV4",
"body" : {
"range" : {
"limit" : limit
},
"glimpse" : True
}
}
j = self._send("/%s/messages" % self.thread_id, payload)
recent_messages = []
for message in j["m"]["body"]["data"]:
recent_messages.append(Message(message))
return recent_messages
def mark_read_message(self, message_id):
"""Mark a message as be read
:param message_id: message_id to mark to be read
"""
payload = {
"name" : "readMessages",
"body" : {
"message_id" : message_id
}
}
return self._send("/%s/messages" % self.thread_id, payload)
def _send(self, path, message, c=1, v=1):
"""Send websocket message
:param path: command to execute
:param message: message to send
:param c: (optional) ?
:param v: (optional) ?
"""
message["type"] = "CALL"
payload = {
"c" : c,
"v" : v,
"#" : self._request_id,
"p" : path,
"m" : message
}
msg = str(payload).replace("u'","'").replace("'",'"').replace("True","true")
try:
self._websocket.send(msg)
except:
self.start()
self._websocket.send(msg)
self._request_id += 1
result = self._websocket.recv()
return json.loads(result)
def upload_image(self, path):
"""Upload an image to Between server
:param path: path of file to upload
"""
mime_type = MimeTypes().guess_type(path)[0]
files = {
'file_body': open(path)
}
j = self.post("/%s/files/uploadPhoto" % self.user_id, files=files)
image = Image(j['image'], _id=j['id'])
return image
def get_status(self):
j = self.get("/%s/views/status" % self.relationship_id)
for user in j['users']:
if user['email'] == self.email:
self.me = Person(user)
else:
self.lover = Person(user)
return j
def get_endpoints(self):
j = self.get("/info/endpoints")
self.message_endpoints = j['message']
self._websocket_url = "%s&access_token=%s" % (j['websocket'][0], self.access_token)
self._websocket = websocket.create_connection(self._websocket_url)
return j
def run_forever(self, on_message, on_error=None, on_close=None):
"""Long polling method
:param on_message: method that will executed when message is arrived.
"""
self._websocket_app = websocket.WebSocketApp(self._websocket_url,
on_message = on_message,
on_error = on_error,
on_close = on_close)
self.run_forever_mode = True
self._websocket_app.run_forever()
def set_device(self, os_type="D_WINDOWS"):
payload = {
"type" : os_type
}
j = self.get("/%s/device" % self.session_id, payload)
return j
def delete_session(self):
j = self.delete('/%s/' % self.session_id)
return j
def __del__(self):
j = self.get_status()
j = self.delete_session()
return j['value']
|
bsd-3-clause
| -5,605,430,828,047,814,000
| 30.783158
| 139
| 0.400411
| false
| 4.613998
| false
| false
| false
|
nektor211/imgaug
|
tests/check_noise.py
|
1
|
2519
|
from __future__ import print_function, division
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from scipy import misc
import numpy as np
from skimage import data
import cv2
def main():
nb_rows = 8
nb_cols = 8
h, w = (128, 128)
sample_size = 128
noise_gens = [
iap.SimplexNoise(),
iap.FrequencyNoise(exponent=-4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=-2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=0, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=2, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=4, size_px_max=sample_size, upscale_method="cubic"),
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
iap.IterativeNoiseAggregator(
other_param=iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
iterations=(1, 3),
aggregation_method=["max", "avg"]
),
iap.IterativeNoiseAggregator(
other_param=iap.Sigmoid(
iap.FrequencyNoise(exponent=(-4, 4), size_px_max=(4, sample_size), upscale_method=["nearest", "linear", "cubic"]),
threshold=(-10, 10),
activated=0.33,
mul=20,
add=-10
),
iterations=(1, 3),
aggregation_method=["max", "avg"]
)
]
samples = [[] for _ in range(len(noise_gens))]
for _ in range(nb_rows * nb_cols):
for i, noise_gen in enumerate(noise_gens):
samples[i].append(noise_gen.draw_samples((h, w)))
rows = [np.hstack(row) for row in samples]
grid = np.vstack(rows)
misc.imshow((grid*255).astype(np.uint8))
images = [ia.quokka_square(size=(128, 128)) for _ in range(16)]
seqs = [
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0)),
iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), per_channel=True)
]
images_aug = []
for seq in seqs:
images_aug.append(np.hstack(seq.augment_images(images)))
images_aug = np.vstack(images_aug)
misc.imshow(images_aug)
if __name__ == "__main__":
main()
|
mit
| 3,232,499,235,980,668,000
| 37.166667
| 138
| 0.611354
| false
| 3.284224
| false
| false
| false
|
gusnaughton/orbit
|
orbit/pyencoding.py
|
1
|
4918
|
# thrown when there isn't enough data
from _codecs import utf_8_decode
import struct
big_endian, little_endian = range(2)
packEndians = {
big_endian: '>',
little_endian: '<'
}
class ReadException(Exception):
pass
# endian is here for automatic struct.packing purposes
def packByte(b, endian=big_endian):
return struct.pack('%sB' % packEndians[endian], b)
def packSByte(endian=big_endian):
return struct.pack('%sb' % packEndians[endian], b)
def packShort(endian=big_endian):
return struct.pack('%sh' % packEndians[endian], s)
def packUShort(s, endian=big_endian):
return struct.pack('%sH' % packEndians[endian], s)
def packInt(i, endian=big_endian):
return struct.pack('%si' % packEndians[endian], i)
def packUInt(i, endian=big_endian):
return struct.pack('%sI' % packEndians[endian], i)
def packLong(i, endian=big_endian):
return struct.pack('%sq' % packEndians[endian], i)
def packULong(i, endian=big_endian):
return struct.pack('%sQ' % packEndians[endian], i)
# All of the names of the compiled structs on the reader object
# This makes dealing with endians faster
compiled_struct_names = [
('struct_sbyte', '%sb'),
('struct_short', '%sh'),
('struct_ushort', '%sH'),
('struct_int', '%si'),
('struct_uint', '%sI'),
('struct_long', '%sq'),
('struct_ulong', '%sQ'),
('struct_float', '%sf'),
('struct_double', '%sd'),
('struct_bool', '%s?'),
]
class Reader(object):
def __init__(self, data=None, endian=big_endian):
self.index = 0
self.endian = packEndians[endian]
for (struct_name, struct_prototype) in compiled_struct_names:
setattr(self, struct_name, struct.Struct(struct_prototype % self.endian))
if data is None:
self.data = ''
else:
self.data = data
def addData(self, data):
self.data += data
def has(self, c):
return len(self.data) - self.index >= c
def advance(self, c):
self.index += c
def revert(self):
self.index = 0
def commit(self):
self.data = self.data[self.index:]
self.index = 0
def empty(self):
self.data = ''
self.index = 0
def peekByte(self):
if not self.has(1):
raise ReadException()
return ord(self.data[self.index])
def readByte(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return ord(self.data[self.index - 1])
def readSByte(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return self.struct_sbyte.unpack_from(self.data, self.index - 1)[0]
def readChars(self, count):
if not self.has(count):
raise ReadException()
self.advance(count)
return self.data[self.index - count:self.index]
def readBytes(self, count):
if not self.has(count):
raise ReadException()
self.advance(count)
return [ord(x) for x in list(self.data[self.index - count:self.index])]
def readChar(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return chr(self.data[self.index - 1])
def readShort(self):
if not self.has(2):
raise ReadException()
self.advance(2)
return self.struct_short.unpack_from(self.data, self.index - 2)[0]
def readUShort(self):
if not self.has(2):
raise ReadException()
self.advance(2)
return self.struct_ushort.unpack_from(self.data, self.index - 2)[0]
def readInt(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_int.unpack_from(self.data, self.index - 4)[0]
def readUInt(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_uint.unpack_from(self.data, self.index - 4)[0]
def readLong(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_long.unpack_from(self.data, self.index - 8)[0]
def readULong(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_ulong.unpack_from(self.data, self.index - 8)[0]
def readFloat(self):
if not self.has(4):
raise ReadException()
self.advance(4)
return self.struct_float.unpack_from(self.data, self.index - 4)[0]
def readDouble(self):
if not self.has(8):
raise ReadException()
self.advance(8)
return self.struct_double.unpack_from(self.data, self.index - 8)[0]
def readBool(self):
if not self.has(1):
raise ReadException()
self.advance(1)
return self.struct_bool.unpack_from(self.data, self.index - 1)[0]
def readCharArray(self, len_func):
if hasattr(len_func, '__call__'):
l = len_func()
else:
l = len_func
return self.readChars(l)
def readArray(self, len_func, data_func, data_len=None):
if hasattr(len_func, '__call__'):
l = len_func()
else:
l = len_func
if data_len is not None and not self.has(l * data_len):
raise ReadException()
ret = []
for i in range(l):
ret.append(data_func())
return ret
def readUTF8(self):
l = self.readUShort()
if not self.has(l):
raise ReadException()
ret = utf_8_decode(self.data[self.index:self.index + l])[0]
self.advance(l)
return ret
|
mit
| -5,072,918,801,631,369,000
| 21.153153
| 76
| 0.672834
| false
| 2.772266
| false
| false
| false
|
TobiasLohner/proSoar
|
prosoar/task/json_writer.py
|
1
|
1481
|
import json
def write_json_task(task):
database = {}
database['type'] = task.type
database['distance'] = task.distance
database['aat_min_time'] = task.aat_min_time
database['start_max_speed'] = task.start_max_speed
database['start_max_height'] = task.start_max_height
database['start_max_height_ref'] = task.start_max_height_ref
database['finish_min_height'] = task.finish_min_height
database['finish_min_height_ref'] = task.finish_min_height_ref
database['fai_finish'] = task.fai_finish
database['min_points'] = task.min_points
database['max_points'] = task.max_points
database['homogeneous_tps'] = task.homogeneous_tps
database['is_closed'] = task.is_closed
database['task_scored'] = task.task_scored
for key, turnpoint in enumerate(task):
database[key] = {'lon': turnpoint.lon,
'lat': turnpoint.lat,
'name': turnpoint.name,
'id': turnpoint.id,
'comment': turnpoint.comment,
'altitude': turnpoint.altitude,
'type': turnpoint.sector.type,
'radius': turnpoint.sector.radius,
'inner_radius': turnpoint.sector.inner_radius,
'start_radial': turnpoint.sector.start_radial,
'end_radial': turnpoint.sector.end_radial}
return json.dumps(database, indent=1)
|
gpl-2.0
| -5,785,154,506,445,901,000
| 41.314286
| 71
| 0.579338
| false
| 3.846753
| false
| false
| false
|
andymckay/zamboni
|
mkt/constants/comm.py
|
1
|
5367
|
from tower import ugettext_lazy as _
# To add a note type:
# - assign it a incremented number (MY_NOTE_TYPE = 42)
# - give it a translation in NOTE_TYPES
# - if adding from amo/log.py, add it to ACTION_MAP
# - add the translation to Commbadge settings
# Faith of the seven.
NO_ACTION = 0
APPROVAL = 1
REJECTION = 2
DISABLED = 3
MORE_INFO_REQUIRED = 4
ESCALATION = 5
REVIEWER_COMMENT = 6
RESUBMISSION = 7
APPROVE_VERSION_PRIVATE = 8
ESCALATION_HIGH_ABUSE = 9
ESCALATION_HIGH_REFUNDS = 10
ESCALATION_CLEARED = 11
REREVIEW_CLEARED = 12
SUBMISSION = 13
DEVELOPER_COMMENT = 14
REVIEW_DEVICE_OVERRIDE = 15
REVIEW_FEATURES_OVERRIDE = 16
REREVIEW_MANIFEST_CHANGE = 17
REREVIEW_MANIFEST_URL_CHANGE = 18
REREVIEW_PREMIUM_TYPE_UPGRADE = 19
REREVIEW_DEVICES_ADDED = 20
REREVIEW_FEATURES_CHANGED = 21
REREVIEW_CONTENT_RATING_ADULT = 22
ESCALATION_VIP_APP = 22
ESCALATION_PRERELEASE_APP = 23
PRIORITY_REVIEW_REQUESTED = 24
ADDITIONAL_REVIEW = 25
NOTE_TYPES = {
NO_ACTION: _('No action'),
APPROVAL: _('Approved'),
REJECTION: _('Rejected'),
DISABLED: _('Banned'),
MORE_INFO_REQUIRED: _('More information requested'),
ESCALATION: _('Escalated'),
REVIEWER_COMMENT: _('Comment'),
RESUBMISSION: _('App resubmission'),
APPROVE_VERSION_PRIVATE: _('Approved but private'),
ESCALATION_CLEARED: _('Escalation cleared'),
ESCALATION_HIGH_ABUSE: _('Escalated due to High Abuse Reports'),
ESCALATION_HIGH_REFUNDS: _('Escalated due to High Refund Requests'),
REREVIEW_CLEARED: _('Re-review cleared'),
SUBMISSION: _('App submission notes'),
DEVELOPER_COMMENT: _('Developer comment'),
REVIEW_DEVICE_OVERRIDE: _('Device(s) changed by reviewer'),
REVIEW_FEATURES_OVERRIDE: _('Requirement(s) changed by reviewer'),
REREVIEW_MANIFEST_CHANGE: _('Rereview due to Manifest Change'),
REREVIEW_MANIFEST_URL_CHANGE: _('Rereview due to Manifest URL Change'),
REREVIEW_PREMIUM_TYPE_UPGRADE: _('Rrereview due to Premium Type Upgrade'),
REREVIEW_DEVICES_ADDED: _('Rereview due to Devices Added'),
REREVIEW_FEATURES_CHANGED: _('Rereview due to Requirements Change'),
REREVIEW_CONTENT_RATING_ADULT: _('Rereview due to Adult Content Rating'),
ESCALATION_VIP_APP: _('Escalation due to VIP App'),
ESCALATION_PRERELEASE_APP: _('Escalation due to Prelease App'),
PRIORITY_REVIEW_REQUESTED: _('Priority review requested'),
ADDITIONAL_REVIEW: _('Additional review completed'),
}
# Note types only visible by reviewers and not developers.
REVIEWER_NOTE_TYPES = (
ESCALATION,
REVIEWER_COMMENT,
ESCALATION_HIGH_ABUSE,
ESCALATION_HIGH_REFUNDS,
ESCALATION_CLEARED,
REREVIEW_MANIFEST_CHANGE,
REREVIEW_MANIFEST_URL_CHANGE,
REREVIEW_PREMIUM_TYPE_UPGRADE,
REREVIEW_DEVICES_ADDED,
REREVIEW_FEATURES_CHANGED,
REREVIEW_CONTENT_RATING_ADULT,
ESCALATION_VIP_APP,
ESCALATION_PRERELEASE_APP,
PRIORITY_REVIEW_REQUESTED
)
# Note types that can be created through the API view.
API_NOTE_TYPE_WHITELIST = (
NO_ACTION,
REVIEWER_COMMENT,
DEVELOPER_COMMENT,
)
def U_NOTE_TYPES():
return dict((key, unicode(value)) for (key, value) in
NOTE_TYPES.iteritems())
def ACTION_MAP(activity_action):
"""Maps ActivityLog action ids to Commbadge note types."""
import amo
if isinstance(activity_action, amo._LOG):
activity_action = activity_action.id
return {
amo.LOG.APPROVE_VERSION.id: APPROVAL,
amo.LOG.APPROVE_VERSION_PRIVATE.id: APPROVE_VERSION_PRIVATE,
amo.LOG.APP_DISABLED.id: DISABLED,
amo.LOG.ESCALATE_MANUAL.id: ESCALATION,
amo.LOG.ESCALATE_VERSION.id: ESCALATION,
amo.LOG.ESCALATION_VIP_APP.id: ESCALATION,
amo.LOG.ESCALATED_HIGH_ABUSE.id: ESCALATION_HIGH_ABUSE,
amo.LOG.ESCALATED_HIGH_REFUNDS.id: ESCALATION_HIGH_REFUNDS,
amo.LOG.ESCALATION_CLEARED.id: ESCALATION_CLEARED,
amo.LOG.REQUEST_INFORMATION.id: MORE_INFO_REQUIRED,
amo.LOG.REJECT_VERSION.id: REJECTION,
amo.LOG.REREVIEW_CLEARED.id: REREVIEW_CLEARED,
amo.LOG.WEBAPP_RESUBMIT.id: RESUBMISSION,
amo.LOG.COMMENT_VERSION.id: REVIEWER_COMMENT,
amo.LOG.REVIEW_FEATURES_OVERRIDE.id: REVIEW_FEATURES_OVERRIDE,
amo.LOG.REVIEW_DEVICE_OVERRIDE.id: REVIEW_DEVICE_OVERRIDE,
amo.LOG.REREVIEW_MANIFEST_CHANGE.id: REREVIEW_MANIFEST_CHANGE,
amo.LOG.REREVIEW_MANIFEST_URL_CHANGE.id: REREVIEW_MANIFEST_URL_CHANGE,
amo.LOG.REREVIEW_PREMIUM_TYPE_UPGRADE.id:
REREVIEW_PREMIUM_TYPE_UPGRADE,
amo.LOG.REREVIEW_DEVICES_ADDED.id: REREVIEW_DEVICES_ADDED,
amo.LOG.REREVIEW_FEATURES_CHANGED.id: REREVIEW_FEATURES_CHANGED,
amo.LOG.CONTENT_RATING_TO_ADULT.id:
REREVIEW_CONTENT_RATING_ADULT,
amo.LOG.ESCALATION_VIP_APP.id: ESCALATION_VIP_APP,
amo.LOG.ESCALATION_PRERELEASE_APP.id: ESCALATION_PRERELEASE_APP,
amo.LOG.PRIORITY_REVIEW_REQUESTED.id: PRIORITY_REVIEW_REQUESTED,
amo.LOG.PASS_ADDITIONAL_REVIEW.id: ADDITIONAL_REVIEW,
amo.LOG.FAIL_ADDITIONAL_REVIEW.id: ADDITIONAL_REVIEW,
}.get(activity_action, NO_ACTION)
# Number of days a token is valid for.
THREAD_TOKEN_EXPIRY = 30
# Number of times a token can be used.
MAX_TOKEN_USE_COUNT = 5
MAX_ATTACH = 10
# Prefix of the reply to address in comm emails.
REPLY_TO_PREFIX = 'commreply+'
|
bsd-3-clause
| -4,813,002,732,634,742,000
| 35.263514
| 78
| 0.703931
| false
| 3.05812
| false
| false
| false
|
yudingding6197/fin_script
|
debug/bs_record.py
|
1
|
1577
|
#!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import os
import pandas as pd
from openpyxl import Workbook
from openpyxl.reader.excel import load_workbook
# ͳ¼Æbuy_sellϵļǼ£¬½¨ÒéʹÓÃÉÏÒ»¼¶µÄtransaction_sum.py£¬Í³¼ÆÐÅÏ¢¸ü¶à
# Main
pindex = len(sys.argv)
if pindex<2:
sys.stderr.write("Usage: " +os.path.basename(sys.argv[0])+ " ´úÂë\n")
exit(0)
code = sys.argv[1]
path = "../buy_sell/"
file_list = []
filter_item = "A1:I1"
for f in os.listdir(path):
if os.path.isfile(path + f) is False:
continue
file_list.append(f)
c_list = ['date','time','code','name','op','vol','price','amount']
df = pd.DataFrame()
st_date = file_list[0][6:12]
ed_date = file_list[-1][6:12]
print st_date,ed_date
for file in file_list:
dt_str = file[6:12]
if dt_str.isdigit() is False:
print "Invalid file(%s) or date(%s)" % (file, dt_str)
continue
sheet_st = 'table'
wb = load_workbook(path+file)
ws = wb.get_sheet_by_name(sheet_st)
for rx in range(2, ws.max_row+1):
w1 = ws.cell(row = rx, column = 1).value
w2 = ws.cell(row = rx, column = 2).value
w3 = ws.cell(row = rx, column = 3).value
w4 = ws.cell(row = rx, column = 4).value
w5 = ws.cell(row = rx, column = 5).value
w6 = ws.cell(row = rx, column = 6).value
w7 = ws.cell(row = rx, column = 7).value
w2 = "%06d" % (w2)
if w2!=code:
continue
temp_list = [int(dt_str),w1,w2,w3,w4,w5,w6,w7]
df1 = pd.DataFrame([temp_list], columns=c_list)
df = df.append(df1)
#print temp_list
if len(df)>0:
filename = "%s%s_S%s-%s_%s.xlsx" %(path, "trade/", code, st_date, ed_date)
df.to_excel(filename)
|
gpl-2.0
| -564,167,224,171,468,740
| 26.189655
| 75
| 0.636652
| false
| 2.125337
| false
| false
| false
|
JeffDestroyerOfWorlds/hydro_examples
|
advection/advection.py
|
1
|
11152
|
"""
2nd-order accurate finite-volume implementation of linear advection with
piecewise linear slope reconstruction.
We are solving a_t + u a_x = 0
This script defines two classes:
-- the Grid1d class that manages a cell-centered grid and holds the
data that lives on that grid
-- the Simulation class that is built on a Grid1d object and defines
everything needed to do a advection.
Options for several different slope limiters are provided.
M. Zingale
"""
import numpy
import pylab
import math
# helper functions for the limiting
def minmod(a, b):
if (abs(a) < abs(b) and a*b > 0.0):
return a
elif (abs(b) < abs(a) and a*b > 0.0):
return b
else:
return 0.0
def maxmod(a, b):
if (abs(a) > abs(b) and a*b > 0.0):
return a
elif (abs(b) > abs(a) and a*b > 0.0):
return b
else:
return 0.0
class Grid1d:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.ng = ng
self.nx = nx
self.xmin = xmin
self.xmax = xmax
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
# storage for the solution
self.a = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def scratch_array(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fill_BCs(self):
""" fill all single ghostcell with periodic boundary conditions """
n = 0
while n < self.ng:
# left boundary
self.a[self.ilo-1-n] = self.a[self.ihi-n]
# right boundary
self.a[self.ihi+1+n] = self.a[self.ilo+n]
n += 1
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
class Simulation:
def __init__(self, grid, u, C=0.8, slope_type="centered"):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.slope_type = slope_type
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "tophat":
self.grid.a[:] = 0.0
self.grid.a[numpy.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)] = 1.0
elif type == "sine":
self.grid.a[:] = numpy.sin(2.0*math.pi*self.grid.x/(self.grid.xmax-self.grid.xmin))
elif type == "gaussian":
self.grid.a[:] = 1.0 + numpy.exp(-60.0*(self.grid.x - 0.5)**2)
def timestep(self):
""" return the advective timestep """
return self.C*self.grid.dx/self.u
def period(self):
""" return the period for advection with velocity u """
return (self.grid.xmax - self.grid.xmin)/self.u
def states(self, dt):
""" compute the left and right interface states """
# compute the piecewise linear slopes
g = self.grid
slope = g.scratch_array()
g = self.grid
if self.slope_type == "godunov":
# piecewise constant = 0 slopes
slope[:] = 0.0
elif self.slope_type == "centered":
# unlimited centered difference slopes
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = 0.5*(g.a[i+1] - g.a[i-1])/g.dx
i += 1
elif self.slope_type == "minmod":
# minmod limited slope
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = minmod( (g.a[i] - g.a[i-1])/g.dx,
(g.a[i+1] - g.a[i])/g.dx )
i += 1
elif self.slope_type == "MC":
# MC limiter
i = g.ilo-1
while i <= g.ihi+1:
slope[i] = minmod(minmod( 2.0*(g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx ),
0.5*(g.a[i+1] - g.a[i-1])/g.dx)
i += 1
elif self.slope_type == "superbee":
# superbee limiter
i = g.ilo-1
while i <= g.ihi+1:
A = minmod( (g.a[i+1] - g.a[i])/g.dx,
2.0*(g.a[i] - g.a[i-1])/g.dx )
B = minmod( (g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx )
slope[i] = maxmod(A, B)
i += 1
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
al = g.scratch_array()
ar = g.scratch_array()
i = g.ilo
while i <= g.ihi+1:
# left state on the current interface comes from zone i-1
al[i] = g.a[i-1] + 0.5*g.dx*(1.0 - u*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ar[i] = g.a[i] - 0.5*g.dx*(1.0 + u*dt/g.dx)*slope[i]
i += 1
return al, ar
def riemann(self, al, ar):
"""
Riemann problem for advection -- this is simply upwinding,
but we return the flux
"""
if self.u > 0.0:
return self.u*al
else:
return self.u*ar
def update(self, dt, flux):
""" conservative update """
g = self.grid
anew = g.scratch_array()
anew[g.ilo:g.ihi+1] = g.a[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(self, num_periods=1):
""" evolve the linear advection equation """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while (self.t < tmax):
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if (self.t + dt > tmax):
dt = tmax - self.t
# get the interface states
al, ar = self.states(dt)
# solve the Riemann problem at all interfaces
flux = self.riemann(al, ar)
# do the conservative update
anew = self.update(dt, flux)
g.a[:] = anew[:]
self.t += dt
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compare limiting and no-limiting
xmin = 0.0
xmax = 1.0
nx = 64
ng = 2
g = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = Simulation(g, u, C=0.7, slope_type="centered")
s.init_cond("tophat")
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
color="r", label="unlimited")
s = Simulation(g, u, C=0.7, slope_type="minmod")
s.init_cond("tophat")
s.evolve(num_periods=5)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
color="b", label="minmod limiter")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", color="0.5", label="exact")
pylab.legend(frameon=False, loc="best")
pylab.xlabel(r"$x$")
pylab.ylabel(r"$a$")
pylab.savefig("fv-advect.eps")
#-------------------------------------------------------------------------
# convergence test
problem = "gaussian"
xmin = 0.0
xmax = 1.0
ng = 2
N = [32, 64, 128, 256, 512]
err = []
for nx in N:
g = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = Simulation(g, u, C=0.8, slope_type="centered")
s.init_cond("gaussian")
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
# compute the error
err.append(g.norm(g.a - ainit))
print g.dx, nx, err[-1]
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
pylab.scatter(N, err, color="r")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2,
color="k", label=r"$\mathcal{O}(\Delta x^2)$")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlabel("N")
pylab.ylabel(r"$\|\| a^\mathrm{final} - a^\mathrm{init} \|\|_2$",
fontsize=16)
pylab.legend(frameon=False)
pylab.savefig("plm-converge.png")
#-------------------------------------------------------------------------
# different limiters: run both the Gaussian and tophat
xmin = 0.0
xmax = 1.0
nx = 128
ng = 2
u = 1.0
g= Grid1d(nx, ng, xmin=xmin, xmax=xmax)
for p in ["gaussian", "tophat"]:
pylab.clf()
s = Simulation(g, u, C=0.8, slope_type="godunov")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(231)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("piecewise constant")
s = Simulation(g, u, C=0.8, slope_type="centered")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(232)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("centered (unlimited)")
s = Simulation(g, u, C=0.8, slope_type="minmod")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(233)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("minmod limiter")
s = Simulation(g, u, C=0.8, slope_type="MC")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(234)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("MC limiter")
s = Simulation(g, u, C=0.8, slope_type="superbee")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
pylab.subplot(235)
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.title("superbee limiter")
f = pylab.gcf()
f.set_size_inches(10.0,7.0)
pylab.tight_layout()
pylab.savefig("fv-{}-limiters.png".format(p), bbox_inches="tight")
|
bsd-3-clause
| -5,731,016,496,385,529,000
| 24.060674
| 95
| 0.492019
| false
| 3.038692
| false
| false
| false
|
nprapps/elections14
|
app.py
|
1
|
9458
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import copy
import json
import argparse
from flask import Flask, render_template
import app_config
import app_utils
from app_utils import get_last_updated
from render_utils import make_context, smarty_filter, urlencode_filter
import slides
import static_app
import static_theme
app = Flask(__name__)
app.jinja_env.filters['smarty'] = smarty_filter
app.jinja_env.filters['urlencode'] = urlencode_filter
@app.template_filter()
def format_board_time(dt):
"""
Format a time for the big board
"""
if not dt:
return ''
return '{d:%l}:{d.minute:02}'.format(d=dt) + ' EST'
@app.template_filter()
def format_percent(num):
"""
Format a percentage
"""
return int(round(num))
@app.template_filter()
def format_precincts_percent(num):
"""
Format a percentage for precincts reporting
"""
if num > 0 and num < 1:
return '<1'
if num > 99 and num < 100:
return '>99'
else:
return int(round(num))
@app.template_filter()
def signed(num):
"""
Add sign to number (e.g. +1, -1)
"""
return '{0:+d}'.format(num)
@app.route('/')
def index():
"""
Example view demonstrating rendering a simple HTML page.
"""
from models import Race
context = make_context()
with open('data/featured.json') as f:
context['featured'] = json.load(f)
context['races'] = Race.select()
"""
Balance of Power data
"""
races = Race.select().where(Race.office_name == 'U.S. Senate').order_by(Race.state_postal)
context['not_called'] = app_utils.calculate_seats_left(races)
if app_config.DEPLOY_PROMO:
template_file = 'promo.html'
else:
template_file = 'index.html'
return render_template(template_file, **context), 200,
@app.route('/promo/')
def promo():
"""
Test promo template.
"""
return render_template('promo.html', **make_context())
@app.route('/board/<slug>/')
def _big_board(slug):
"""
Preview a slide outside of the stack.
"""
context = make_context()
context['body'] = _slide(slug).data
if slug == 'senate-big-board':
title = 'U.S. Senate'
elif slug == 'house-big-board-one':
title = 'U.S. House 1'
elif slug == 'house-big-board-two':
title = 'U.S. House 2'
elif slug == 'governor-big-board':
title = 'Governors'
elif slug == 'ballot-measures-big-board':
title = 'Ballot measures'
context['title'] = title
return render_template('_big_board_wrapper.html', **context)
@app.route('/bop.html')
@app_utils.cors
def _bop():
"""
Serve the most recent bop data
"""
from models import Race
context = make_context()
races = Race.select().where(Race.office_name == 'U.S. Senate').order_by(Race.state_postal)
context['bop'] = app_utils.calculate_bop(races, app_utils.SENATE_INITIAL_BOP)
context['not_called'] = app_utils.calculate_seats_left(races)
return render_template('bop.html', **context)
@app.route('/live-data/stack.json')
@app_utils.cors
def _stack_json():
"""
Serve up the current slide stack.
"""
from models import SlideSequence
data = SlideSequence.stack()
# There is one state slug to manipulate in the stack, but the client
# should see two
for i, d in enumerate(data):
if d['slug'] == 'state-house-results':
one = copy(d)
one['slug'] = 'state-house-results-1'
two = copy(d)
two['slug'] = 'state-house-results-2'
data[i:i + 1] = [
one,
two
]
break
js = json.dumps(data)
return js, 200, { 'Content-Type': 'application/javascript' }
@app.route('/preview/state-house-results/index.html')
@app.route('/preview/state-senate-results/index.html')
def _state_picker_preview():
"""
Preview a state slide outside of the stack.
"""
context = make_context()
return render_template('_state_picker_preview.html', **context)
@app.route('/preview/state-house-results-<string:slug>-<int:page>/index.html')
@app_utils.cors
def _state_house_slide_preview(slug, page):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
context['body'] = _state_house_slide(slug, page).data
return render_template('slide_preview.html', **context)
@app.route('/preview/state-senate-results-<slug>/index.html')
@app_utils.cors
def _state_senate_slide_preview(slug):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
resp = _state_senate_slide(slug)
if resp.status_code == 200:
context['body'] = resp.data
return render_template('slide_preview.html', **context)
else:
return "404", 404
@app.route('/preview/<slug>/index.html')
@app_utils.cors
def _slide_preview(slug):
"""
Preview a slide outside of the stack.
"""
from models import SlideSequence
context = make_context()
sequence = SlideSequence.select()
for slide in sequence:
if slide.slide.slug == slug:
context['in_sequence'] = True
previous_slide_order = slide.order - 1
next_slide_order = slide.order + 1
break
try:
context['previous_slide'] = SlideSequence.get(SlideSequence.order == previous_slide_order).slide.slug
except:
pass
try:
context['next_slide'] = SlideSequence.get(SlideSequence.order == next_slide_order).slide.slug
except:
pass
context['body'] = _slide(slug).data.decode('utf-8')
context['slug'] = slug
return render_template('slide_preview.html', **context)
@app.route('/slides/state-house-results-<string:slug>-<int:page>.html')
@app_utils.cors
def _state_house_slide(slug, page):
"""
Serve a state slide.
"""
from models import Race, Slide
slide = Slide.get(Slide.slug == 'state-house-results')
slug = slug.upper()
races = Race.select().where(
(Race.office_name == 'U.S. House') &
(Race.state_postal == slug)
).order_by(Race.seat_number)
timestamp = get_last_updated(races)
context = make_context(timestamp=timestamp)
context['slide_class'] = 'state-house'
context['state_postal'] = slug
context['state_name'] = app_config.STATES.get(slug)
# Calculate BOP using all races
context.update(app_utils.calculate_state_bop(races))
# Filter to display races
races = races.where(Race.featured_race == True)
if slug in app_config.PAGINATED_STATES:
race_count = races.count()
page_size = race_count / 2
if page == 1:
races = races.limit(page_size)
elif page == 2:
races = races.offset(page_size)
context['page'] = page
if races.count():
context['time_on_screen'] = slide.time_on_screen
context['races'] = [race for race in races]
context['body'] = render_template('slides/state_house.html', **context)
return render_template('_slide.html', **context)
else:
return "no races", 404
@app.route('/slides/state-senate-results-<slug>.html')
@app_utils.cors
def _state_senate_slide(slug):
"""
Serve a state slide.
"""
from models import Race, Slide
slide = Slide.get(Slide.slug == 'state-senate-results')
slug = slug.upper()
senate_races = Race.select().where(
(Race.office_name == 'U.S. Senate') &
(Race.state_postal == slug)
).order_by(Race.seat_number)
governor_races = Race.select().where(
(Race.office_name == 'Governor') &
(Race.state_postal == slug)
)
if senate_races.count() == 0 and governor_races.count() == 0:
return "404", 404
senate_updated = get_last_updated(senate_races)
governor_updated = get_last_updated(governor_races)
if senate_updated > governor_updated:
timestamp = senate_updated
else:
timestamp = governor_updated
context = make_context(timestamp=timestamp)
context['state_postal'] = slug
context['state_name'] = app_config.STATES.get(slug)
context['slide_class'] = 'state-senate'
context['senate'] = senate_races
context['governor'] = governor_races
context['time_on_screen'] = slide.time_on_screen
context['body'] = render_template('slides/state_senate.html', **context)
return render_template('_slide.html', **context)
@app.route('/slides/<slug>.html')
@app_utils.cors
def _slide(slug):
"""
Serve up slide html fragment
"""
from models import Slide
context = make_context()
slide = Slide.get(Slide.slug == slug)
view_name = slide.view_name
if slide.data:
context['body'] = slides.__dict__[view_name](slide.data)
else:
context['body'] = slides.__dict__[view_name]()
context['slide_class'] = view_name.replace('_', '-')
context['time_on_screen'] = slide.time_on_screen
return render_template('_slide.html', **context)
app.register_blueprint(static_app.static_app)
app.register_blueprint(static_theme.theme)
# Boilerplate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
args = parser.parse_args()
server_port = 8000
if args.port:
server_port = int(args.port)
app.run(host='0.0.0.0', port=server_port, debug=app_config.DEBUG)
|
mit
| 3,996,981,375,998,131,000
| 24.771117
| 109
| 0.617467
| false
| 3.350337
| true
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/aio/operations/_incident_comments_operations.py
|
1
|
14785
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IncidentCommentsOperations:
"""IncidentCommentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.securityinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_incident(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
filter: Optional[str] = None,
orderby: Optional[str] = None,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs
) -> AsyncIterable["models.IncidentCommentList"]:
"""Gets all incident comments.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param filter: Filters the results, based on a Boolean condition. Optional.
:type filter: str
:param orderby: Sorts the results. Optional.
:type orderby: str
:param top: Returns only the first n results. Optional.
:type top: int
:param skip_token: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls. Optional.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IncidentCommentList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.securityinsight.models.IncidentCommentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentCommentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_incident.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IncidentCommentList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_incident.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
incident_comment_id: str,
**kwargs
) -> "models.IncidentComment":
"""Gets an incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
async def create_comment(
self,
resource_group_name: str,
workspace_name: str,
incident_id: str,
incident_comment_id: str,
message: Optional[str] = None,
**kwargs
) -> "models.IncidentComment":
"""Creates the incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:param message: The comment message.
:type message: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_incident_comment = models.IncidentComment(message=message)
api_version = "2020-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_comment.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_incident_comment, 'IncidentComment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_comment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
|
mit
| 862,310,406,738,324,700
| 50.515679
| 279
| 0.642611
| false
| 4.178915
| true
| false
| false
|
rancavil/python-oauth2
|
oauth2/__init__.py
|
1
|
29076
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url.encode('utf-8'))
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k.encode('utf-8'), []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
mit
| 3,934,429,948,027,602,400
| 32.809302
| 265
| 0.609575
| false
| 4.251499
| false
| false
| false
|
auready/django
|
django/db/models/query.py
|
1
|
69244
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F
from django.db.models.fields import AutoField
from django.db.models.functions import Trunc
from django.db.models.query_utils import InvalidQuery, Q
from django.db.models.sql.constants import CURSOR
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet:
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self, chunked_fetch=True))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
if param != 'pk': # It's okay to use a model's pk property.
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields, **expressions):
clone = self._clone()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False):
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
_fields = []
expressions = {}
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id = str(id(field))
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._clone()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare_as_filter_value(self):
if self._fields is None:
queryset = self.values('pk')
queryset.query._forced_pk = True
else:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
queryset = self._clone()
return queryset.query.as_subquery_filter(queryset._db)
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = columns.index(query_name)
columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return columns
@cached_property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and queryset._iterable_class is not ModelIterable:
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._clone(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
bsd-3-clause
| 2,804,857,369,781,657,600
| 39.37551
| 115
| 0.589784
| false
| 4.373397
| false
| false
| false
|
VerTiGoEtrex/spideTor
|
spideTor/Metafile.py
|
1
|
7762
|
# coding: utf-8
'''
Created on Jul 24, 2014
@author: Noah
'''
import bencode
import logging
import pprint
import os.path
log = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent = 1, width = 80)
HASHLEN = 20
class Metafile:
'''
Decodes the metadata stored in a .torrent metafile and presents a standard interface to access it
Notice about the bencode library. Everything is encoded in UTF-8 strings, so you must .decode them to get them back to unicode
'''
def __init__(self, metafilepath):
self.metafilepath = metafilepath
with open(metafilepath, 'rb') as metafile:
encoded = metafile.read()
log.debug("Read metafile successfully")
log.debug("decoding bencoded data")
self.decoded = bencode.bdecode(encoded)
log.debug("decoded as {}".format(pp.pformat(self.decoded)))
if self.isSingleFileTorrent():
log.debug("metafile appears to be a single file metafile")
else:
log.debug("metafile appears to contain many files")
self.files = None
def __unicode__(self):
return self.metafilepath
def __str__(self):
return self.__unicode__().encode("utf-8")
def getMetafileFiles(self):
if self.files != None:
return self.files
self.files = []
if self.isSingleFileTorrent():
self.files.append(MetafileFile(self.getName(), self.decoded['info']['length']))
else:
for metadata in self.decoded['info']['files']:
self.files.append(MetafileFile(os.path.join(*(path.decode("utf-8") for path in metadata['path'])), metadata['length']))
return self.files
def getPieces(self):
hashes = self.getHashes()
log.debug("Number of pieces: {}".format(len(hashes)))
pieceLength = self.decoded['info']['piece length']
log.debug("Piece length: {}".format(pieceLength))
pieces = []
# Populate all of the constant-length pieces
metafileFiles = self.getMetafileFiles()
fileIterator = iter(metafileFiles)
currentFile = fileIterator.next()
currentPiecePosition = 0
currentFileReadPosition = 0
prevPiece = None
for pieceNumber in xrange(0, len(hashes)):
# Get files in this piece (similar to a merge)
# This is a list, because ordering matters
filesInPiece = []
# If this file ends inside this piece, then advance to the next one and add it too
#Piece ------XXXXX-----
#File --XXXXXX++++----
#AND ALSO
#Piece ------XXXXX-----
#File --XXXXXXXXX+++--
bytesReadInPiece = 0
while currentPiecePosition + currentFile.getSize() <= (pieceNumber + 1) * pieceLength:
currentPiecePosition += currentFile.getSize()
bytesRemainingInFile = currentFile.getSize() - currentFileReadPosition
filesInPiece.append(MetafileFileWithOffset(currentFile, currentFileReadPosition, bytesRemainingInFile, (currentFileReadPosition == 0)))
bytesReadInPiece += bytesRemainingInFile
currentFileReadPosition = 0
try:
currentFile = fileIterator.next()
except StopIteration, e:
# That was the last file. This should be the last piece, which is asserted later.
currentFile = None
break
if currentFile != None:
bytesToRead = min(pieceLength - bytesReadInPiece, currentFile.getSize() - currentFileReadPosition)
filesInPiece.append(MetafileFileWithOffset(currentFile, currentFileReadPosition, bytesToRead, False))
currentFileReadPosition += bytesToRead
elif not pieceNumber == len(hashes)-1 or len(filesInPiece) == 0: #Assert that this is the last piece
log.error("Ran out of files on piece {} / {}".format(pieceNumber, len(hashes)-1))
return
log.debug("Piece [{}/{}]: {} files".format(pieceNumber, len(hashes)-1, len(filesInPiece)))
pieceToInsert = Piece(pieceNumber, hashes[pieceNumber], pieceLength, filesInPiece)
# Setup linked list (for heapq updating)
pieceToInsert.setPrevPiece(prevPiece)
if prevPiece != None:
prevPiece.setNextPiece(pieceToInsert)
pieces.append(pieceToInsert)
prevPiece = pieceToInsert
return pieces
def getHashes(self):
allHashes = self.decoded['info']['pieces']
return [allHashes[window:window+HASHLEN] for window in xrange(0, len(allHashes), HASHLEN)]
def getName(self):
return self.decoded['info']['name'].decode("utf-8")
def isSingleFileTorrent(self):
return 'length' in self.decoded['info']
def getMetafilePath(self):
return self.metafilepath
class Piece:
'''
Holds information about a "piece" in the metafile
'''
def __init__(self, pieceNumber, pieceHash, pieceLength, fileWithOffsetsInPiece):
self.pieceNumber = pieceNumber
self.pieceHash = pieceHash
self.pieceLength = pieceLength
self.fileWithOffsetsInPiece = fileWithOffsetsInPiece
self.nextPiece = None
self.prevPiece = None
def getPieceNumber(self):
return self.pieceNumber
def getFileWithOffsetsInPiece(self):
return self.fileWithOffsetsInPiece
def getHash(self):
return self.pieceHash
def getPieceLength(self):
return self.pieceLength
def oneFileInPiece(self):
return len(self.fileWithOffsetsInPiece) == 1
def getOneFileInPiece(self):
if self.oneFileInPiece():
return next(iter(self.fileWithOffsetsInPiece))
def setPrevPiece(self, prevPiece):
self.prevPiece = prevPiece
def setNextPiece(self, nextPiece):
self.nextPiece = nextPiece
def getPrevPiece(self):
return self.prevPiece
def getNextPiece(self):
return self.nextPiece
class MetafileFile:
'''
Holds more detailed information about a file within a metafile
'''
def __init__(self, file_path, size):
'''
Constructs a new MetafileFile object
'''
self.file_path = file_path
self.size = size
def __unicode__(self):
return self.getPath()
def __str__(self):
return self.__unicode__().encode("utf-8")
def getPath(self):
return self.file_path
def getSize(self):
return self.size
class MetafileFileWithOffset:
'''
Holds some additional information about a file as it relates to a piece
'''
def __init__(self, metafileFile, startOffset, readLength, entirelyInPiece):
self.metafileFile = metafileFile
self.startOffset = startOffset
self.readLength = readLength
self.entirelyInPiece = entirelyInPiece
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
return unicode(self.metafileFile)
def __repr__(self):
return "MFWO|" + self.__str__()
def getMetafileFile(self):
return self.metafileFile
def getStartOffset(self):
return self.startOffset
def getReadLength(self):
return self.readLength
def fileEntirelyInPiece(self):
return self.entirelyInPiece
|
apache-2.0
| -5,599,650,462,333,740,000
| 32.606061
| 151
| 0.598171
| false
| 4.262493
| false
| false
| false
|
onepercentclub/onepercentclub-site
|
apps/projects/management/commands/cron_status_realised.py
|
1
|
3907
|
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from django.utils.translation import ugettext as _
class Command(BaseCommand):
args = 'No arguments required'
help = 'Sets projects to "Done Incomplete" and task status to "Realised" when the deadline is passed'
def handle(self, *args, **options):
from apps.projects.models import Project
from bluebottle.bb_projects.models import ProjectPhase
from apps.tasks.models import Task
"""
Projects which have expired but have been funded will already have their status
set to done-complete so these can be ignored. We only need to update projects which
haven't been funded but have expired, or they have been overfunded and have expired.
"""
try:
done_incomplete_phase = ProjectPhase.objects.get(slug='done-incomplete')
self.stdout.write("Found ProjectPhase model with name 'Done Incomplete'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Done Incomplete' does not exist")
try:
done_complete_phase = ProjectPhase.objects.get(slug='done-complete')
self.stdout.write("Found ProjectPhase model with name 'Done Complete'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Done Complete' does not exist")
try:
campaign_phase = ProjectPhase.objects.get(slug='campaign')
self.stdout.write("Found ProjectPhase model with name 'Campaign'")
except ProjectPhase.DoesNotExist:
raise CommandError("A ProjectPhase with name 'Campaign' does not exist")
"""
Projects which have at least the funds asked, are still in campaign phase and have not expired
need the campaign funded date set to now.
FIXME: this action should be moved into the code where 'amount_needed' is calculated => when
the value is lte 0 then set campaign_funded.
"""
self.stdout.write("Checking Project funded and still running...")
Project.objects.filter(amount_needed__lte=0, status=campaign_phase, deadline__gt=now()).update(campaign_funded=now())
"""
Projects which have at least the funds asked, are still in campaign phase but have expired
need to be set to 'done complete' and the campaign ended date set to now.
Iterate over projects and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Project overfunded deadlines...")
for project in Project.objects.filter(amount_needed__lt=0, status=campaign_phase, deadline__lte=now()).all():
project.status = done_complete_phase
project.campaign_ended = now()
project.save()
"""
Projects which don't have the funds asked, are still in campaign phase but have expired
need to be set to 'done incomplete' and the campaign ended date set to now.
Iterate over projects and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Project unfunded deadlines...")
for project in Project.objects.filter(status=campaign_phase, deadline__lt=now()).all():
project.status = done_incomplete_phase
project.campaign_ended = now()
project.save()
"""
Iterate over tasks and save them one by one so the receivers get a signal
"""
self.stdout.write("Checking Task deadlines...\n\n")
for task in Task.objects.filter(status='in progress', deadline__lt=now()).all():
task.status = 'realized'
task.save()
self.stdout.write("Successfully updated the status of expired Project and Task models.\n\n")
|
bsd-3-clause
| 7,871,289,710,018,549,000
| 49.102564
| 125
| 0.666752
| false
| 4.634638
| false
| false
| false
|
Blazemeter/taurus
|
bzt/jmx/base.py
|
1
|
55715
|
"""
Module holds base stuff regarding JMX format
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import traceback
from cssselect import GenericTranslator
from lxml import etree
from urllib import parse
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.utils import BetterDict, iteritems, numeric_types
from bzt.requests_model import has_variable_pattern
LOG = logging.getLogger("")
def try_convert(val, func=int, default=None):
if val is None:
res = val
elif has_variable_pattern(val): # it's property...
if default is not None:
val = get_prop_default(val) or default
res = func(val)
else:
res = val
else:
res = func(val)
return res
def get_prop_default(val):
comma_ind = val.find(",")
comma_found = comma_ind > -1
is_property = val.startswith("${__property(") or val.startswith("${__P(")
if has_variable_pattern(val) and is_property and comma_found:
return val[comma_ind + 1: -2]
else:
return None
def cond_int(val):
if isinstance(val, float):
return int(val)
return val
def cond_float(val, rounding=None):
if isinstance(val, numeric_types):
return round(float(val), rounding) if rounding is not None else float(val)
return val
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
THR_TIMER = "kg.apc.jmeter.timers.VariableThroughputTimer"
SET_VAR_ACTION = "kg.apc.jmeter.control.sampler.SetVariablesAction"
def __init__(self, original=None, test_plan_name="BZT Generated Test Plan"):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname=test_plan_name,
testclass="TestPlan", enabled="true")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel("TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise TaurusInternalException: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
msg = "XML parsing failed for file %s: %s"
raise TaurusInternalException(msg % (original, exc))
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise TaurusInternalException: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise TaurusInternalException(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
listener.append(jtl)
listener.append(obj_prop)
return listener
@staticmethod
def new_kpi_listener(filename, flag_overrides=None):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
defaults = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"connectTime": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": True,
"hostname": True,
"threadCounts": True,
"url": False
}
flags = BetterDict.from_dict(defaults)
if flag_overrides:
flags.merge(flag_overrides)
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_xml_listener(filename, is_full, user_flags):
"""
:param is_full: bool
:param filename: str
:param user_flags: BetterDict
:return:
"""
default_flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": False,
"samplerData": False,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
flags = BetterDict.from_dict(default_flags)
flags.merge(user_flags)
if is_full:
writer = JMX.__jtl_writer(filename, "Trace Writer", flags)
else:
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp", name=name, elementType="Arguments",
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
def get_auth_manager(authorizations, clear_flag):
mgr = etree.Element("AuthManager", guiclass="AuthPanel", testclass="AuthManager",
testname="HTTP Authorization Manager")
if clear_flag:
mgr.append(JMX._bool_prop("AuthManager.clearEachIteration", True))
auth_coll = JMX._collection_prop("AuthManager.auth_list")
mgr.append(auth_coll)
for authorization in authorizations:
auth_element = JMX._element_prop(name="", element_type="Authorization")
conf_url = authorization.get("url", "")
conf_name = authorization.get("name", "")
conf_pass = authorization.get("password", "")
conf_domain = authorization.get("domain", "")
conf_realm = authorization.get("realm", "")
conf_mech = authorization.get("mechanism", "").upper()
if not (conf_name and conf_pass and (conf_url or conf_domain)):
LOG.warning("Wrong authorization: %s" % authorization)
continue
auth_element.append(JMX._string_prop("Authorization.url", conf_url))
auth_element.append(JMX._string_prop("Authorization.username", conf_name))
auth_element.append(JMX._string_prop("Authorization.password", conf_pass))
auth_element.append(JMX._string_prop("Authorization.domain", conf_domain))
auth_element.append(JMX._string_prop("Authorization.realm", conf_realm))
if conf_mech == "KERBEROS": # optional prop
auth_element.append(JMX._string_prop("Authorization.mechanism", "KERBEROS"))
auth_coll.append(auth_element)
return mgr
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None, follow_redirects=True,
use_random_host_ip=False, host_ips=()):
"""
Generates HTTP request
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui", testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
if isinstance(body, str):
JMX.__add_body_from_string(args, body, proxy)
elif isinstance(body, dict):
JMX.__add_body_from_script(args, body, proxy)
elif body:
msg = "Cannot handle 'body' option of type %s: %s"
raise TaurusInternalException(msg % (type(body), body))
parsed_url = parse.urlparse(url)
JMX.__add_hostnameport_2sampler(parsed_url, proxy, url)
path = parsed_url.path
if parsed_url.params:
path += ";" + parsed_url.params
if parsed_url.query:
path += "?" + parsed_url.query
proxy.append(JMX._string_prop("HTTPSampler.path", path))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
proxy.append(JMX._bool_prop("HTTPSampler.follow_redirects", follow_redirects))
proxy.append(JMX._bool_prop("HTTPSampler.auto_redirects", False))
if timeout is not None:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if encoding is not None:
proxy.append(JMX._string_prop("HTTPSampler.contentEncoding", encoding))
proxy.extend(JMX.get_files_elements(files))
if use_random_host_ip and host_ips:
if len(host_ips) > 1:
expr = "${__chooseRandom(%s,randomAddr)}" % ",".join(host_ips)
else:
expr = host_ips[0]
proxy.append(JMX._string_prop("HTTPSampler.ipSource", expr))
return proxy
@staticmethod
def get_files_elements(files):
elements = []
if files:
files_prop = JMX._element_prop("HTTPsampler.Files", "HTTPFileArgs")
elements.append(files_prop)
files_coll = JMX._collection_prop("HTTPFileArgs.files")
for file_dict in files:
file_elem = JMX._element_prop(file_dict.get("path", ""), "HTTPFileArg")
file_elem.append(JMX._string_prop("File.path", file_dict.get("path", "")))
file_elem.append(JMX._string_prop("File.paramname", file_dict.get("param", "")))
file_elem.append(JMX._string_prop("File.mimetype", file_dict.get("mime-type", "")))
files_coll.append(file_elem)
files_prop.append(files_coll)
return elements
@staticmethod
def get_keystore_config_elements(variable_name, start_index, end_index, preload):
elements = []
if variable_name:
elements = etree.Element("KeystoreConfig", guiclass="TestBeanGUI", testclass="KeystoreConfig",
testname="Taurus-Keystore-Configuration")
elements.append(JMX._string_prop("clientCertAliasVarName", variable_name))
elements.append(JMX._string_prop("startIndex", start_index))
elements.append(JMX._string_prop("endIndex", end_index))
elements.append(JMX._string_prop("preload", preload))
return elements
@staticmethod
def __add_body_from_string(args, body, proxy):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
try:
header.append(JMX._string_prop("Argument.value", body))
except ValueError:
LOG.warning("Failed to set body: %s", traceback.format_exc())
header.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
@staticmethod
def __add_body_from_script(args, body, proxy):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
try:
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
except ValueError:
LOG.warning("Failed to get element property: %s", traceback.format_exc())
http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument")
try:
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB"))
try:
http_element_prop.append(
JMX._string_prop("Argument.value", arg_value if arg_value is not None else ''))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True))
use_equals = arg_value is not None
http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None))
http_element_prop.append(JMX._string_prop("Argument.metadata", '=' if use_equals else ''))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
@staticmethod
def __add_hostnameport_2sampler(parsed_url, proxy, url):
if parsed_url.scheme:
proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc_parts = parsed_url.netloc.split(':')
if netloc_parts[0]:
proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0]))
if len(netloc_parts) > 1 and netloc_parts[1]:
proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1]))
else:
try:
if parsed_url.port:
proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
else:
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
except ValueError:
LOG.debug("Non-parsable port: %s", url)
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = str(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = str(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def int_prop(name, value):
"""
JMX int property
:param name:
:param value:
:return:
"""
res = etree.Element("intProp", name=name)
res.text = str(value)
return res
@staticmethod
def get_thread_group(concurrency=None, rampup=0, hold=0, iterations=None,
testname="ThreadGroup", on_error="continue", thread_delay=False, scheduler_delay=None):
"""
Generates ThreadGroup
Expected values (by JMeter):
ThreadGroup.num_threads (concurrency): int
ThreadGroup.ramp_time (rampup): int
ThreadGroup.scheduler (need to hold): boolean
ThreadGroup.duration (rampup + hold): int
LoopController.loops (iterations): int
ThreadGroup.delayedStart: boolean
:return: etree element, ThreadGroup
"""
rampup = cond_int(rampup or 0)
hold = cond_int(hold or 0)
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if not hold:
duration = rampup
elif not rampup:
duration = hold
elif isinstance(rampup, numeric_types) and isinstance(hold, numeric_types):
duration = hold + rampup
else:
duration = "${__intSum(%s,%s)}" % (rampup, hold)
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname, enabled=enabled)
if not iterations:
if duration:
iterations = -1
else:
iterations = 1
scheduler = False
if hold or (rampup and (iterations == -1)):
scheduler = True
if on_error is not None:
trg.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
# 'true' causes endless execution of TG in non-gui mode
loop.append(JMX._bool_prop("LoopController.continue_forever", False))
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", scheduler))
trg.append(JMX._string_prop("ThreadGroup.duration", duration))
if scheduler_delay:
trg.append(JMX._string_prop("ThreadGroup.delay", scheduler_delay))
if thread_delay:
trg.append(JMX._bool_prop("ThreadGroup.delayedStart", thread_delay))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element(self.THR_TIMER,
guiclass=self.THR_TIMER + "Gui",
testclass=self.THR_TIMER,
testname="Throughput_Limiter",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
"""
Adds schedule to rps shaper
Expected values (by JMeter):
<first> ('start_rps'): float
<second> ('end_rps'): float
<third> ('duration'): int
"""
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("")
start_rps_prop = self._string_prop("", cond_float(start_rps, 3))
end_rps_prop = self._string_prop("", cond_float(end_rps, 3))
duration_prop = self._string_prop("", cond_int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
@staticmethod
def get_set_var_action(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element(JMX.SET_VAR_ACTION, guiclass=JMX.SET_VAR_ACTION + "Gui",
testclass=JMX.SET_VAR_ACTION, testname=testname)
arg_element = etree.Element("elementProp", name="SetVariablesAction", guiclass="ArgumentsPanel",
testclass="Arguments", testname="User Defined Variables", elementType="Arguments")
udv_element.append(arg_element)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
arg_element.append(udv_collection_prop)
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(name=str(var_name), element_type="Argument")
udv_collection_prop.append(udv_element_prop)
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_meta_prop)
return udv_element
@staticmethod
def add_user_def_vars_elements(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname=testname)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(str(var_name), "Argument")
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_desc_prop = JMX._string_prop("Argument.desc", "")
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def get_concurrency_thread_group(concurrency=None, rampup=0, hold=0, steps=None, on_error="continue",
testname="ConcurrencyThreadGroup", iterations=""):
"""
Generates ConcurrencyThreadGroup
Expected values (by JMeter):
Targetlevel (concurrency): int
RampUp (rampup): float
Steps (steps): boolean
Hold (hold): float
:return: etree element, Concurrency Thread Group
"""
if not rampup:
rampup = 0
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if steps is None: # zero means infinity of steps
steps = 0
name = 'com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup'
concurrency_thread_group = etree.Element(
name, guiclass=name + "Gui", testclass=name, testname=testname, enabled=enabled)
virtual_user_controller = etree.Element(
"elementProp",
name="ThreadGroup.main_controller",
elementType="com.blazemeter.jmeter.control.VirtualUserController")
concurrency_thread_group.append(virtual_user_controller)
concurrency_thread_group.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
concurrency_thread_group.append(JMX._string_prop("TargetLevel", str(concurrency)))
concurrency_thread_group.append(JMX._string_prop("RampUp", str(cond_int(rampup))))
concurrency_thread_group.append(JMX._string_prop("Steps", steps))
concurrency_thread_group.append(JMX._string_prop("Hold", str(cond_int(hold))))
concurrency_thread_group.append(JMX._string_prop("LogFilename", ""))
concurrency_thread_group.append(JMX._string_prop("Iterations", iterations or ""))
concurrency_thread_group.append(JMX._string_prop("Unit", "S"))
return concurrency_thread_group
@staticmethod
def get_dns_cache_mgr():
"""
Adds dns cache element with defaults parameters
:return:
"""
dns_element = etree.Element("DNSCacheManager", guiclass="DNSCachePanel", testclass="DNSCacheManager",
testname="DNS Cache Manager")
dns_element.append(JMX._collection_prop("DNSCacheManager.servers"))
dns_element.append(JMX._bool_prop("DNSCacheManager.clearEachIteration", False))
dns_element.append(JMX._bool_prop("DNSCacheManager.isCustomResolver", False))
return dns_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
mgr.append(JMX._bool_prop("clearEachIteration", True))
mgr.append(JMX._bool_prop("useExpires", True))
return mgr
@staticmethod
def _get_cookie_mgr(scenario=None):
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
mgr.append(JMX._bool_prop("CookieManager.clearEachIteration", False))
mgr.append(JMX._string_prop("CookieManager.implementation",
"org.apache.jmeter.protocol.http.control.HC4CookieHandler"))
if scenario:
cookies = scenario.get(Scenario.COOKIES)
if cookies:
cookies_coll = JMX._collection_prop("CookieManager.cookies")
mgr.append(cookies_coll)
for cookie in cookies:
if not isinstance(cookie, dict):
raise TaurusConfigError("Cookie must be dictionary: %s" % cookie)
c_name = cookie.get("name", TaurusConfigError("Name of cookie isn't found: %s" % cookie))
c_value = cookie.get("value", TaurusConfigError("Value of cookie isn't found: %s" % cookie))
c_domain = cookie.get("domain", TaurusConfigError("Domain of cookie isn't found: %s" % cookie))
c_path = cookie.get("path", "")
c_secure = cookie.get("secure", False)
# follow params are hardcoded in JMeter
c_expires = 0
c_path_specified = True
c_domain_specified = True
c_elem = etree.Element("elementProp", name=c_name, elementType="Cookie", testname=c_name)
c_elem.append(JMX._string_prop("Cookie.value", c_value))
c_elem.append(JMX._string_prop("Cookie.domain", c_domain))
c_elem.append(JMX._string_prop("Cookie.path", c_path))
c_elem.append(JMX._bool_prop("Cookie.secure", c_secure))
c_elem.append(JMX._long_prop("Cookie.expires", c_expires))
c_elem.append(JMX._bool_prop("Cookie.path_specified", c_path_specified))
c_elem.append(JMX._bool_prop("Cookie.domain_specified", c_domain_specified))
cookies_coll.append(c_elem)
return mgr
@staticmethod
def _get_http_defaults(default_address=None, timeout=None, retrieve_resources=None, concurrent_pool_size=4,
content_encoding=None, resources_regex=None):
"""
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if default_address:
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc = parsed_url.netloc
if ':' in netloc:
index = netloc.rfind(':')
cfg.append(JMX._string_prop("HTTPSampler.port", netloc[index + 1:]))
netloc = netloc[:index]
cfg.append(JMX._string_prop("HTTPSampler.domain", netloc))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if content_encoding:
cfg.append(JMX._string_prop("HTTPSampler.contentEncoding", content_encoding))
if resources_regex:
cfg.append(JMX._string_prop("HTTPSampler.embedded_url_re", resources_regex))
return cfg
@staticmethod
def _get_dur_assertion(timeout):
"""
:type timeout: int
:return:
"""
element = etree.Element("DurationAssertion", guiclass="DurationAssertionGui",
testclass="DurationAssertion", testname="Timeout Check")
element.append(JMX._string_prop("DurationAssertion.duration", timeout))
return element
@staticmethod
def get_constant_timer(delay):
timer_type = "ConstantTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("%s.delay" % timer_type, delay))
return [element, etree.Element("hashTree")]
@staticmethod
def get_uniform_timer(maximum, offset):
timer_type = "UniformRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", maximum))
return [element, etree.Element("hashTree")]
@staticmethod
def get_gaussian_timer(dev, offset):
timer_type = "GaussianRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", dev))
return [element, etree.Element("hashTree")]
@staticmethod
def get_poisson_timer(lam, delay):
timer_type = "PoissonRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
element.append(JMX._string_prop("RandomTimer.range", lam))
return [element, etree.Element("hashTree")]
@staticmethod
def _get_extractor(varname, headers, regexp, template, match_no, default='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
if isinstance(template, int):
template = '$%s$' % template
if headers.lower() == 'headers':
headers = 'true'
elif headers.lower() == 'http-code':
headers = 'code'
elif headers.lower() == 'url':
headers = 'URL'
else:
headers = 'body'
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("RegexExtractor.useHeaders", headers))
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.default", default))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_boundary_extractor(varname, subject, left, right, match_no, defvalue='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
subjects = {
'body': 'false',
'body-unescaped': 'unescaped',
'body-as-document': 'as_document',
'response-headers': 'true',
'request-headers': 'request_headers',
'url': 'URL',
'code': 'code',
'message': 'message',
}
subject = subjects.get(subject)
element = etree.Element("BoundaryExtractor", guiclass="BoundaryExtractorGui",
testclass="BoundaryExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("BoundaryExtractor.useHeaders", subject))
element.append(JMX._string_prop("BoundaryExtractor.refname", varname))
element.append(JMX._string_prop("BoundaryExtractor.lboundary", left)) # TODO: html-escape boundaries?
element.append(JMX._string_prop("BoundaryExtractor.rboundary", right))
element.append(JMX._string_prop("RegexExtractor.default", defvalue))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_jquerycss_extractor(varname, selector, attribute, match_no, default="NOT_FOUND", scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("HtmlExtractor", guiclass="HtmlExtractorGui", testclass="HtmlExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("HtmlExtractor.refname", varname))
element.append(JMX._string_prop("HtmlExtractor.expr", selector))
element.append(JMX._string_prop("HtmlExtractor.attribute", attribute))
element.append(JMX._string_prop("HtmlExtractor.match_number", match_no))
element.append(JMX._string_prop("HtmlExtractor.default", default))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default='NOT_FOUND', from_variable=None):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
if from_variable:
element.append(JMX._string_prop("VARIABLE", from_variable))
element.append(JMX._string_prop("SUBJECT", "VAR"))
return element
@staticmethod
def get_scope_props(scope, from_variable):
props = []
if scope:
props.append(JMX._string_prop("Sample.scope", scope))
if scope == "variable":
props.append(JMX._string_prop("Scope.variable", from_variable))
return props
@staticmethod
def _get_internal_json_extractor(varname, jsonpath, default, scope, from_variable, match_no, concat):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "JSONPostProcessor"
element = etree.Element(package,
guiclass="%sGui" % package,
testclass="%s" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("JSONPostProcessor.referenceNames", varname))
element.append(JMX._string_prop("JSONPostProcessor.jsonPathExprs", jsonpath))
element.append(JMX._string_prop("JSONPostProcessor.match_numbers", match_no))
if default:
element.append(JMX._string_prop("JSONPostProcessor.defaultValues", default))
element.extend(JMX.get_scope_props(scope, from_variable))
if concat:
element.append(JMX._bool_prop("JSONPostProcessor.compute_concat", True))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert, regexp=True):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:type invert: bool
:type regexp: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
element.append(JMX._bool_prop("ISREGEX", regexp))
return element
@staticmethod
def _get_xpath_extractor(varname, xpath, default, validate_xml, ignore_whitespace, match_no, use_namespaces,
use_tolerant_parser, scope, from_var):
"""
:type varname: str
:type xpath: str
:type default: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("XPathExtractor",
guiclass="XPathExtractorGui",
testclass="XPathExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("XPathExtractor.refname", varname))
element.append(JMX._string_prop("XPathExtractor.xpathQuery", xpath))
element.append(JMX._string_prop("XPathExtractor.default", default))
element.append(JMX._bool_prop("XPathExtractor.validate", validate_xml))
element.append(JMX._bool_prop("XPathExtractor.whitespace", ignore_whitespace))
element.append(JMX._string_prop("XPathExtractor.matchNumber", match_no))
element.append(JMX._bool_prop("XPathExtractor.namespace", use_namespaces))
element.append(JMX._bool_prop("XPathExtractor.tolerant", use_tolerant_parser))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_xpath_assertion(xpath, validate_xml, ignore_whitespace, use_tolerant_parser, invert):
"""
:type xpath: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:return: lxml.etree.Element
"""
element = etree.Element("XPathAssertion",
guiclass="XPathAssertionGui",
testclass="XPathAssertion",
testname="XPath Assertion")
element.append(JMX._string_prop("XPath.xpath", xpath))
element.append(JMX._bool_prop("XPath.validate", validate_xml))
element.append(JMX._bool_prop("XPath.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPath.tolerant", use_tolerant_parser))
element.append(JMX._bool_prop("XPath.negate", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert, assume_success=False):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s %s" % ("hasn't" if is_invert else "has",
"[" + ", ".join('"' + str(x) + '"' for x in contains) + "]")
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == Scenario.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == Scenario.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
element.append(JMX._bool_prop("Assertion.assume_success", assume_success))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_jsr223_element(language, script_file, parameters, execute, script_text=None, cache_key='true'):
if execute == "before":
proc = "JSR223PreProcessor"
else:
proc = "JSR223PostProcessor"
element = etree.Element(proc, guiclass="TestBeanGUI", testclass=proc, testname=proc)
element.append(JMX._string_prop("filename", script_file if script_file else ''))
element.append(JMX._string_prop("script", script_text if script_text else ''))
element.append(JMX._string_prop("parameters", parameters))
element.append(JMX._string_prop("scriptLanguage", language))
element.append(JMX._string_prop("cacheKey", cache_key))
return element
@staticmethod
def _get_csv_config(path, delimiter, loop, variable_names, is_quoted):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", loop))
element.append(JMX._bool_prop("stopThread", not loop))
element.append(JMX._string_prop("variableNames", variable_names))
return element
@staticmethod
def _get_csv_config_random(path, delimiter, loop, variable_names):
"""
:type path: str
:type delimiter: str
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("com.blazemeter.jmeter.RandomCSVDataSetConfig",
guiclass="com.blazemeter.jmeter.RandomCSVDataSetConfigGui",
testclass="com.blazemeter.jmeter.RandomCSVDataSetConfig",
testname="bzm - Random CSV Data Set Config")
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("fileEncoding", "UTF-8"))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._string_prop("variableNames", variable_names))
element.append(JMX._bool_prop("randomOrder", True))
element.append(JMX._bool_prop("ignoreFirstLine", False if variable_names else True))
element.append(JMX._bool_prop("rewindOnTheEndOfList", loop))
element.append(JMX._bool_prop("independentListPerThread", False))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
self.log.debug("Enable %s elements %s: %s", state, sel, items)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
res = 0
for item in items:
item.text = str(text)
res += 1
return res
@staticmethod
def _get_simple_controller(name):
return etree.Element("GenericController", guiclass="LogicControllerGui", testclass="GenericController",
testname=name)
def _add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
@staticmethod
def _get_results_tree():
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
return dbg_tree
@staticmethod
def _get_if_controller(condition):
controller = etree.Element("IfController", guiclass="IfControllerPanel", testclass="IfController",
testname="If Controller")
controller.append(JMX._string_prop("IfController.condition", condition))
return controller
@staticmethod
def _get_once_controller():
"""
Generates Once Only Controller
:return: etree element, OnceOnlyController
"""
controller = etree.Element("OnceOnlyController", guiclass="OnceOnlyControllerGui",
testclass="OnceOnlyController", testname="Once Only Controller")
return controller
@staticmethod
def _get_loop_controller(loops):
"""
Generates Loop Controller
Expected values(by JMeter):
LoopController.loops(iterations): int
LoopController.continue_forever: boolean
:return: etree element, LoopController
"""
if loops == 'forever':
iterations = -1
else:
iterations = loops
controller = etree.Element("LoopController", guiclass="LoopControlPanel", testclass="LoopController",
testname="Loop Controller")
# 'false' means controller can be called only one time (by parent)
controller.append(JMX._bool_prop("LoopController.continue_forever", True))
controller.append(JMX._string_prop("LoopController.loops", str(iterations)))
return controller
@staticmethod
def _get_foreach_controller(input_var, loop_var):
# TODO: useSeparator option
controller = etree.Element("ForeachController", guiclass="ForeachControlPanel", testclass="ForeachController",
testname="ForEach Controller")
controller.append(JMX._string_prop("ForeachController.inputVal", input_var))
controller.append(JMX._string_prop("ForeachController.returnVal", loop_var))
controller.append(JMX._bool_prop("ForeachController.useSeparator", True))
return controller
@staticmethod
def _get_while_controller(condition):
controller = etree.Element("WhileController", guiclass="WhileControllerGui", testclass="WhileController",
testname="While Controller")
controller.append(JMX._string_prop("WhileController.condition", condition))
return controller
@staticmethod
def _get_transaction_controller(transaction_name, force_parent_sample=False, include_timers=False):
controller = etree.Element("TransactionController", guiclass="TransactionControllerGui",
testclass="TransactionController", testname=transaction_name)
controller.append(JMX._bool_prop("TransactionController.parent", force_parent_sample))
controller.append(JMX._bool_prop("TransactionController.includeTimers", include_timers))
return controller
@staticmethod
def _get_functional_mode_prop(enabled):
return JMX._bool_prop("TestPlan.functional_mode", enabled)
@staticmethod
def _get_action_block(action_index, target_index, duration_ms):
action = etree.Element("TestAction", guiclass="TestActionGui", testclass="TestAction", testname="Test Action")
action.append(JMX.int_prop("ActionProcessor.action", action_index))
action.append(JMX.int_prop("ActionProcessor.target", target_index))
action.append(JMX._string_prop("ActionProcessor.duration", str(duration_ms)))
return action
|
apache-2.0
| -383,183,640,151,168,500
| 38.542229
| 119
| 0.590631
| false
| 4.130709
| true
| false
| false
|
titilambert/harbour-squilla
|
squilla/lib/friend.py
|
1
|
8123
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# friend.py
#
# This file is part of Squilla
#
# Copyright (C) 2014 Thibault Cohen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import select
import random
import socket
from time import sleep
import subprocess
from socket import inet_aton
from threading import Thread
import pyotherside
from mdns.zeroconf import ServiceInfo
import dbus
from squilla.lib.logger import logger
from squilla.lib.config import is_favorite, get_favorites
from squilla.lib import get_presence_auth_user, friend_list
from squilla.lib.presence_browser import zeroconf
from squilla.lib.utils import get_interface_address
from squilla.lib.config import get_interface_name
port = 5299
# NEED TO GET THE GOOD IP!!!!
ip_address = '192.168.13.15'
ip_address = get_interface_address(get_interface_name())
#ip_address = '0.0.0.0'
class Friend(Thread):
def __init__(self, fullname, number, auth_user, parent=None):
Thread.__init__(self)
global ip_address
self.fullname = fullname
self.number = number
self.port = port
if ip_address == None:
ip_address = get_interface_address(get_interface_name())
self.ip_address = ip_address
self.auth_user = auth_user
self.node = fullname + "@jolla"
self.favorite = False
self.parent = parent
self.client = None # Bonjour socket client
self.is_ready = False
self.id = 0
def set_data(self, value, attr):
""" For view
"""
if attr == "favorite":
value, bool = value.toInt()
if bool:
if value == QtCore.Qt.Checked:
setattr(self, "favorite", QtCore.Qt.Checked)
else:
setattr(self, "favorite", QtCore.Qt.Unchecked)
return True
return False
def run(self):
# Register on bonjour chat
self.id = random.randint(0, 10000000)
# Prepare properties
txt = {}
txt['1st'] = str(self.fullname)
txt['1st'] = self.fullname.encode('utf-8')
#txt['1st'] = "aaa"
txt['last'] = ""
txt['status'] = 'avail'
txt['port.p2pj'] = 5299
txt['nick'] = self.fullname.encode('utf-8')
txt['node'] = self.node.encode('utf-8')
txt['jid'] = self.node.encode('utf-8')
txt['email'] = self.node.encode('utf-8')
txt['version'] = 1
txt['txtvers'] = 1
name = self.node + '._presence._tcp.local.'
reg_type = '_presence._tcp.local.'
# Prepare service informations
self.info = ServiceInfo(reg_type, name, inet_aton(self.ip_address), self.port, properties=txt)
# Register service
zeroconf.register_service(self.info)
self.is_ready = True
# Join thread
zeroconf.engine.join()
def unregister(self):
""" Unregister service """
zeroconf.unregister_service(self.info)
def send_sms(self, message):
logger.debug("Sending sms using 'dbus'")
bus = dbus.SystemBus()
smsobject = bus.get_object('org.ofono',
'/ril_0')
smsiface = dbus.Interface(smsobject, 'org.ofono.MessageManager')
message = message.encode('utf-8')
smsiface.SendMessage(self.number, message)
logger.debug("Sms send: %s" % message)
logger.debug("to: %s " % self.number)
def sms_to_bonjour(self, msg):
logger.debug("Forward sms to bonjour")
msg = msg.replace("<", "<")
msg = msg.replace(">", ">")
# Waiting self is bonjour registered
while self.is_ready == False:
logger.debug("Waiting bonjour contact "
"registered: %s" % self.fullname)
sleep(1)
# Connect to bonjour server
self.auth_user = get_presence_auth_user()
if self.auth_user is None:
logger.debug("Authentication user not set")
return False
#logger.debug(self.auth_user)
#logger.debug(self.auth_user.values())
#logger.debug(list(self.auth_user.values()))
host = self.auth_user['host']
port = self.auth_user['port']
so = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.debug("Connecting to %s:%s" % (host, port))
try:
so.connect((host, port))
except TypeError as e:
logger.debug("Connection error: %s" % str(e))
return False
# Dont need this !?
so.setblocking(1)
so.settimeout(2)
# Prepare variables
username = self.auth_user['name']
dic = {"to": username,
"from": self.node,
"msg": msg,
"id": self.id}
# Hand check
# Send data
xml = (u"""<?xml version='1.0' encoding='UTF-8'?><stream:stream """
u"""xmlns='jabber:client' """
u"""xmlns:stream='http://etherx.jabber.org/streams' """
u"""to="%(to)s" from="%(from)s" version="1.0">""" % dic)
logger.debug(xml)
so.send(xml.encode('utf-8'))
# Read data
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout1")
except Exception as e:
logger.debug(e)
# Send data
so.send("""<stream:features/>""".encode('utf-8'))
# Read data
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout2")
# Send data
xml = ("""<message from="%(from)s" to="%(to)s" type="chat" """
"""id="%(id)s"><body>%(msg)s</body></message>""" % dic)
logger.debug(xml)
logger.debug("Send message")
so.send(xml.encode('utf-8'))
try:
data = so.recv(1024)
except socket.timeout:
logger.debug("socket.timeout3")
# Close connection
logger.debug("End foward sms to bonjour")
so.close()
return True
def delete_friend(number):
global friend_list
for friend in friend_list:
if friend.number == number:
logger.debug("Friend %s deleted" % friend.fullname)
index = friend_list.index(friend)
friend_list.remove(friend)
friend.unregister()
del(friend)
return index
return None
def add_friend(fullname, number):
global friend_list
number_list = [friend.number for friend in friend_list]
if not number in number_list:
# Create a new friend
logger.debug("This is a new friend: %s" % number)
# Save it !
logger.debug("PRESENCE_AUTH: " + str(get_presence_auth_user()))
auth_user = get_presence_auth_user()
new_friend = Friend(fullname, number, auth_user)
# append to friend list
friend_list.append(new_friend)
# Register it on bonjour
new_friend.start()
tmp_dict = {'name': new_friend.fullname,
'favorite': is_favorite(number),
'number': new_friend.number}
# Add friend in listmodel
pyotherside.send('add_friend_list', tmp_dict)
def load_favorite_friends():
favorites = get_favorites()
for number, name in favorites:
if number and name:
add_friend(name, number)
|
gpl-3.0
| 8,010,937,772,864,076,000
| 31.62249
| 102
| 0.577004
| false
| 3.806467
| false
| false
| false
|
qingshuimonk/STA663
|
vae/Vanilla_GAN.py
|
1
|
3540
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from time import gmtime, strftime
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, 784])
D_W1 = tf.Variable(xavier_init([784, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
Z = tf.placeholder(tf.float32, shape=[None, 100])
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def plot(samples):
fig = plt.figure(figsize=(8, 2))
gs = gridspec.GridSpec(2, 8)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
# G_loss = -tf.reduce_mean(tf.log(D_fake))
# Alternative losses:
# -------------------
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
mb_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('/docs/MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('/]data/GAN_pics/'):
os.makedirs('/]data/GAN_pics/')
i = 0
for it in range(100000):
if it == 99999:
samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})
fig = plot(samples)
plt.savefig('/]data/GAN_pics/{}.png'.format(strftime("%m-%d_%H:%M:%S", gmtime())), bbox_inches='tight')
i += 1
plt.close(fig)
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
|
mit
| -5,731,070,445,213,390,000
| 27.556452
| 126
| 0.632768
| false
| 2.544932
| false
| false
| false
|
pytorch/fairseq
|
fairseq/models/nat/nat_crf_transformer.py
|
1
|
4378
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel, base_architecture
from fairseq.modules import DynamicCRF
@register_model("nacrf_transformer")
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(
num_embedding=len(self.tgt_dict),
low_rank=args.crf_lowrank_approx,
beam_size=args.crf_beam_approx,
)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--crf-lowrank-approx",
type=int,
help="the dimension of low-rank approximation of transition",
)
parser.add_argument(
"--crf-beam-approx",
type=int,
help="the beam size for apporixmating the normalizing factor",
)
parser.add_argument(
"--word-ins-loss-factor",
type=float,
help="weights on NAT loss used to co-training with CRF loss.",
)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
# compute the log-likelihood of CRF
crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
"factor": self.args.word_ins_loss_factor,
},
"word_crf": {"loss": crf_nll},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder and get emission scores
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(
normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
# run viterbi decoding through CRF
_scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("nacrf_transformer", "nacrf_transformer")
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
base_architecture(args)
|
mit
| 8,179,743,690,239,201,000
| 35.181818
| 88
| 0.604386
| false
| 3.754717
| false
| false
| false
|
NicWayand/xray
|
xarray/core/combine.py
|
1
|
15635
|
import warnings
import pandas as pd
from . import utils
from .alignment import align
from .merge import merge
from .pycompat import iteritems, OrderedDict, basestring
from .variable import Variable, as_variable, Coordinate, concat as concat_vars
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
if isinstance(dim, basestring):
coord = None
elif not hasattr(dim, 'dims'):
# dim is not a DataArray or Coordinate
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = Coordinate(dim_name, dim)
dim = dim_name
elif not hasattr(dim, 'name'):
coord = as_variable(dim).to_coord()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
def process_subset_opt(opt, subset):
if subset == 'coords':
subset_long_name = 'coordinates'
else:
subset_long_name = 'data variables'
if isinstance(opt, basestring):
if opt == 'different':
def differs(vname):
# simple helper function which compares a variable
# across all datasets and indicates whether that
# variable differs or not.
v = datasets[0].variables[vname]
return any(not ds.variables[vname].equals(v)
for ds in datasets[1:])
# all nonindexes that are not the same in each dataset
concat_new = set(k for k in getattr(datasets[0], subset)
if k not in concat_over and differs(k))
elif opt == 'all':
concat_new = (set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
concat_new = set()
else:
raise ValueError("unexpected value for concat_%s: %s"
% (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
raise ValueError('some variables in %s are not '
'%s on the first dataset: %s'
% (subset, subset_long_name, invalid_vars))
concat_new = set(opt)
return concat_new
concat_over = set()
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
concat_over.update(process_subset_opt(data_vars, 'data_vars'))
concat_over.update(process_subset_opt(coords, 'coords'))
if dim in datasets[0]:
concat_over.add(dim)
return concat_over
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset, as_dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
datasets = [as_dataset(ds) for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in iteritems(ds.variables):
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif (k in result_vars and k != dim and
not getattr(v, compat)(result_vars[k])):
verb = 'equal' if compat == 'equals' else compat
raise ValueError(
'variable %r not %s across datasets' % (k, verb))
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.expand_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset
for k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
arrays = list(arrays)
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
return arrays[0]._from_temp_dataset(ds, name)
def _auto_concat(datasets, dim=None):
if len(datasets) == 1:
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim)
def auto_combine(datasets, concat_dim=None):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or align or sort data
under any circumstances. It will fail in complex cases, for which you
should use ``concat`` and ``merge`` explicitly.
When ``auto_combine`` may succeed:
* You have N years of data and M data variables. Each combination of a
distinct time period and test of data variables is saved its own dataset.
Examples of when ``auto_combine`` fails:
* In the above scenario, one file is missing, containing the data for one
year's data for one variable.
* In the most recent year, there is an additional data variable.
* Your data includes "time" and "station" dimensions, and each year's data
has a different set of stations.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if the
dimension along which you want to concatenate is not a dimension in
the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
"""
from toolz import itertoolz
grouped = itertoolz.groupby(lambda ds: tuple(sorted(ds.data_vars)),
datasets).values()
concatenated = [_auto_concat(ds, dim=concat_dim) for ds in grouped]
merged = merge(concatenated)
return merged
|
apache-2.0
| -1,998,947,403,007,847,000
| 40.144737
| 81
| 0.6055
| false
| 4.527947
| false
| false
| false
|
register-v1/PyBot
|
API/wisdom.py
|
1
|
6947
|
#!/usr/bin/python3
import random as r
def pick_random(value):
return value[r.randint(0, len(value)-1)]
abbreviation = ["TCP", "HTTP", "SDD", "RAM", "GB",
"CSS", "SSL", "AGP", "SQL", "FTP",
"PCI", "AI", "ADP", "RSS", "XML",
"EXE", "COM", "HDD", "THX", "SMTP",
"SMS", "USB", "PNG", "XSS", "SFTP",
"MITM"]
adjective = ["auxiliary", "primary", "back-end", "digital",
"open-source", "virtual", "cross-platform",
"redundant", "online", "haptic","multi-byte",
"bluetooth", "wireless", "1080p", "neural",
"optical", "solid state", "mobile"]
noun = ["driver", "protocol", "bandwidth", "panel", "microchip",
"program", "port", "card", "array", "interface", "system",
"sensor", "firewall", "hard drive", "pixel", "alarm",
"feed", "monitor", "application", "transmitter", "bus",
"circuit", "capacitor", "matrix", "socket", "database"]
verb = ["back up", "bypass", "hack", "override", "compress", "copy",
"navigate", "index", "connect", "generate", "quantify",
"calculate", "synthesize", "input", "transmit", "program",
"reboot", "parse", "analyze"]
ingverb = ["backing up", "bypassing", "hacking", "overriding",
"compressing", "copying", "navigating", "indexing",
"connecting", "generating", "quantifying", "calculating",
"synthesizing", "transmitting", "programming", "parsing",
"DDoSing", "scamming", "pwning", "rooting", "pigning",
"lurking"]
sentences = [
"If we {} the {}, we can get to the {} {} throught the {} {} {}!"
.format(
pick_random(verb),
pick_random(noun),
pick_random(abbreviation),
pick_random(noun),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun)
),
"We need to {} the {} {} {}!"
.format(
pick_random(verb),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun),
),
"Try to {} the {} {}, maybe it will {} the {} {}!"
.format(
pick_random(verb),
pick_random(abbreviation),
pick_random(noun),
pick_random(verb),
pick_random(adjective),
pick_random(noun),
),
"You can't {} the {} without {} the {} {} {}!"
.format(
pick_random(verb),
pick_random(noun),
pick_random(ingverb),
pick_random(adjective),
pick_random(abbreviation),
pick_random(noun),
)]
wise_sentences = [
"Practice makes perect!",
"Rome was not built in a day!",
"Shoot for the moon! Even if you miss, you'll land amongst the stars!",
"There is no such thing as a hacker that never made a mistake - Anon",
"Learning to code is like growing a tree, takes time - Anon",
"If you work for Microsoft or Apple, get a life - Anon",
"It is easier to build good habits than break bad ones - Forgotton",
"Education makes man unfit for a slave - Frederick Douglas",
"Life as a script kiddie is not a life worth living - Anon",
"A person who never made a mistake, never tried anything new - Einstein",
"If you're not willing to learn code, you don't deserve to know how to code - v1",
"Well being worth a god damn comes with an ability to not be a complete and total retard all the time ~ mickers"
]
urls = [
"https://www.youtube.com/watch?v=ZzfHjytDceU - Topics of Interest: Asyncio",
"https://www.youtube.com/watch?v=lyDLAutA88s - David Beazley: Builtin Superheros!",
"https://www.youtube.com/watch?v=E-1Y4kSsAFc - Fear and awaiting in Async",
"https://www.youtube.com/watch?v=OSGv2VnC0go - Idiomatic, Pythonic code",
"https://www.youtube.com/watch?v=N4mEzFDjqtA - Python in one video : Derek Banas",
"https://www.youtube.com/watch?v=XXmzYY03t64 - Basic SysAdmin's Guide to Python",
"https://www.youtube.com/watch?v=s1SkCYMnfbY - MulitProcessing with Python",
"https://www.youtube.com/watch?v=l_HBRhcgeuQ - Global Interpreter Lock",
"https://www.youtube.com/watch?v=ciNHn38EyRc - SQL Injections with exmaples",
"https://www.youtube.com/watch?v=GMGbOkKfZRo - Beginner SysAdmin with Python",
"https://www.youtube.com/watch?v=yHO8hdqzKw8 - Basic Python for the OS",
"https://www.youtube.com/watch?v=Thd8yoBou7k - SQL for Python Developers",
"https://www.youtube.com/watch?v=T1QEs3mdJoc - Cookie Grabbing Basics",
"https://www.youtube.com/watch?v=Pi9NpxAvYSs - Python Epiphanies"
]
courses = [
"AI! - https://www.youtube.com/watch?v=OGxgnH8y2NM&list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v"
]
topics = [
]
sciences = [
"https://www.youtube.com/watch?v=9Cd36WJ79z4 : Poetry of Reality",
"https://www.youtube.com/watch?v=1PT90dAA49Q : Wave of Reason",
"https://www.youtube.com/watch?v=zSgiXGELjbc : Glorious Dawn - Carl Sagan",
"https://www.youtube.com/watch?v=vioZf4TjoUI : The Cosmic Perspective",
"https://www.youtube.com/watch?v=hOLAGYmUQV0 : The Unbroken Thread"
]
music = [
"https://www.youtube.com/watch?v=X6t3CVafuec : YTCracker - Bazaar",
"https://www.youtube.com/watch?v=ieDBrlKnaAM : YTCracker - Starship",
"https://www.youtube.com/watch?v=2tRKH_BSsk0 : YTCracker - Social Engineering",
"https://www.youtube.com/watch?v=lIuEuJvKos4 : Astrix - Jungle Walk",
"https://www.youtube.com/watch?v=FoUWHfh733Y : Dual Core - All the things",
"https://www.youtube.com/watch?v=zeIjmvZZ_SQ : Zearle - Hackers and Crackers",
"https://www.youtube.com/watch?v=v1BXfMNfjFo : Deep Space House 061",
"https://www.youtube.com/watch?v=scPU1tTIg7Y : VOICIANS - Stranger",
"https://www.youtube.com/watch?v=8fIjqPqJYhA : VOICIANS - Wolves",
"https://www.youtube.com/watch?v=8EQzx-OzQmU : Wavve - 9 is God",
"https://www.youtube.com/watch?v=2GLGZQ4Y8SM : YTCracker - Crack",
"https://www.youtube.com/watch?v=YEP7rhDuWVE : YTCracker - Untouchable",
"https://www.youtube.com/watch?v=Sr8ILq1a_yw : Dual Core - 0x0A Commandments",
"https://www.youtube.com/watch?v=yc7_NHx6oHw : YTCracker - Packets",
"https://www.youtube.com/watch?v=YrRa6dEkzmk : Beat Hackers - Experience",
"https://www.youtube.com/watch?v=f04pC0_U5-I : Talamasca - Psychedelic Trance"
]
noob_quotes = [
"So if I want to write a maleware i must use a github? ~ EniGmis7",
"Windows wouldn't be popular if microsoft didn't know what they were doing ~ leaxyz",
"how hax facebook? ~ Virtually every noob ever.",
"I'm a hacker. Can someone help me reverse an md5? ~ MoHD"
]
def noob():
data = noob_quotes[r.randint(0, len(noob_quotes))]
return data
def troll():
troll = sentences[r.randint(0, len(sentences)-1)]
return troll
def science(): return sciences[r.randint(0, len(sciences))]
def science_song(number): data = sciences[number] ; return data
def wisdom(): return wise_sentences[r.randint(0, len(wise_sentences))]
def urlpls(): return urls[r.randint(0, len(urls))]
def url(num): data = urls[num] ; return data
def song(number): data = music[number] ; return data
def quote(number): data = wise_sentences[number] ; return data
def songpls(): return music[r.randint(0, len(music))]
def randomtopic(): return topics[r.randint(0, len(topics))]
def randomcourse(): return courses[r.randint(0,len(courses))]
|
gpl-3.0
| -2,541,187,709,382,089,700
| 37.17033
| 112
| 0.673816
| false
| 2.718982
| false
| false
| false
|
minicole/elpolitico
|
elpolitico/elpolitico/MyState.py
|
1
|
2660
|
__author__ = 'Nicole'
import json
import random
import time
GREEN = 'green'
CONSERVATIVE = 'conservative'
LIBERAL = 'liberal'
LIBERTARIAN = 'libertarian'
MAX_CACHED_POINTS = 400
STATES = [GREEN, CONSERVATIVE, LIBERAL, LIBERTARIAN]
class MyStates:
def __init__(self):
self.currentStates = [CurrentStateOfParty(GREEN), CurrentStateOfParty(CONSERVATIVE), CurrentStateOfParty(LIBERAL), CurrentStateOfParty(LIBERTARIAN)]
self.newPoints = list()
self.existingPoints = list()
self.totalPoints = 0
def passStateToFrontEnd(self):
pointsToPass = list()
for point in self.newPoints:
self.existingPoints.append(point)
# serialize points:
pointsToPass.append(json.dumps(point.newPoint.exportToFrontEnd()))
# empty the old new points:
self.newPoints = list()
return {'newPoints': pointsToPass, 'timestamp': time.time()}
def addNewPoint(self, point):
self.newPoints.append(point)
state = self.getState(point.party)
self.totalPoints += 1
state.percentTotal = state.totalPoints / self.totalPoints
if self.totalPoints >= MAX_CACHED_POINTS:
self.existingPoints.pop(1)
self.totalPoints -= 1
class CurrentStateOfParty:
def __init__(self, party):
self.party = party
self.percentTotal = 0
self.certainty = 0
self.positivity = 0
self.totalPoints = 0
def addNewPoint(self, point):
self.certainty = (self.certainty * self.totalPoints + point.newPoint.tendency) / (self.totalPoints + 1)
self.positivity = (self.positivity * self.totalPoints + point.positivity) / (self.totalPoints + 1)
self.totalPoints += 1
def exportToFrontEnd(self):
return {'party': self.party, 'percentTotal': self.percentTotal, 'certainty': self.certainty, 'positivity': self.positivity}
def exportRandomness(self):
return {'party': "conservative", 'percentTotal': random.randint(-60,60), 'certainty': random.randint(-60,60), 'positivity': random.randint(-60,60)}
class StateOfPoint:
def __init__(self):
self.newPoint = NewPoint()
self.positivity = 0
class NewPoint:
def __init__(self):
self.tendency = 0
self.lat = 0
self.long = 0
self.party = None
def exportToFrontEnd(self):
return {"lat": self.lat, "long": self.long, "tendency": self.tendency, "party": self.party}
def exportRandomness(self):
return {"lat": random.randint(-60,60), "long": random.randint(-60,60), "tendency": random.randint(-60,60), "party": random.randint(-60,60)}
|
mit
| -5,317,939,568,438,419,000
| 32.2625
| 156
| 0.645865
| false
| 3.546667
| false
| false
| false
|
fullmooninu/messy
|
testIfCanCheat.py
|
1
|
1065
|
import random
n = 10
print("strategy 1")
s1_points = 0
for i in range(100000):
num1 = random.randint(1,n)
num2 = num1
while num2==num1:
num2 = random.randint(1,n)
choice = random.randint(1,2)
if choice == 1:
if num1 > num2:
s1_points = s1_points +1
if choice == 2:
if num2 > num1:
s1_points = s1_points +1
print("points",s1_points)
print("strategy 2")
s2_points = 0
for i in range(100000):
num1 = random.randint(1,n)
num2 = num1
while num2==num1:
num2 = random.randint(1,n)
num3 = random.randint(1,n)
choice = random.randint(1,2)
if choice == 1:
if num3 > num1:
if num2 > num1:
s2_points = s2_points + 1
elif num1 > num2:
s2_points = s2_points +1
if choice == 2:
if num3 > num2:
if num1 > num2:
s2_points = s2_points + 1
elif num2 > num1:
s2_points = s2_points + 1
print("points",s2_points)
|
unlicense
| 6,161,339,017,714,504,000
| 22.666667
| 41
| 0.501408
| false
| 3.09593
| false
| false
| false
|
Florianjw/NiceCharts
|
nicechart.py
|
1
|
22515
|
#!/usr/bin/env python
# nicechart.py
#
# Copyright 2011
#
# Christoph Sterz
# Florian Weber
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# These two lines are only needed if you don't put the script directly into
# the installation directory
import sys
#sys.path.append('/usr/share/inkscape/extensions')
# We will use the inkex module with the predefined Effect base class.
import inkex
# The simplestyle module provides functions for style parsing.
from simplestyle import *
import math, re, nicechart_colors as nc_colors
class NiceChart(inkex.Effect):
"""
Example Inkscape effect extension.
Creates a new layer with a "Hello World!" text centered in the middle of the document.
"""
def __init__(self):
"""
Constructor.
Defines the "--what" option of a script.
"""
# Call the base class constructor.
inkex.Effect.__init__(self)
# Define string option "--what" with "-w" shortcut and default chart values.
self.OptionParser.add_option('-w', '--what', action = 'store',
type = 'string', dest = 'what', default = '22,11,67',
help = 'Chart Values')
# Define string option "--type" with "-t" shortcut.
self.OptionParser.add_option("-t", "--type", action="store",
type="string", dest="type", default='',
help="Chart Type")
# Define bool option "--blur" with "-b" shortcut.
self.OptionParser.add_option("-b", "--blur", action="store",
type="inkbool", dest="blur", default='True',
help="Blur Type")
# Define string option "--file" with "-f" shortcut.
self.OptionParser.add_option("-f", "--filename", action="store",
type="string", dest="filename", default='',
help="Name of File")
# Define string option "--input_type" with "-i" shortcut.
self.OptionParser.add_option("-i", "--input_type", action="store",
type="string", dest="input_type", default='file',
help="Chart Type")
# Define string option "--delimiter" with "-d" shortcut.
self.OptionParser.add_option("-d", "--delimiter", action="store",
type="string", dest="csv_delimiter", default=';',
help="delimiter")
# Define string option "--colors" with "-c" shortcut.
self.OptionParser.add_option("-c", "--colors", action="store",
type="string", dest="colors", default='default',
help="color-scheme")
# Define string option "--colors_override"
self.OptionParser.add_option("", "--colors_override", action="store",
type="string", dest="colors_override", default='',
help="color-scheme-override")
self.OptionParser.add_option("", "--reverse_colors", action="store",
type="inkbool", dest="reverse_colors", default='False',
help="reverse color-scheme")
self.OptionParser.add_option("-k", "--col_key", action="store",
type="int", dest="col_key", default='0',
help="column that contains the keys")
self.OptionParser.add_option("-v", "--col_val", action="store",
type="int", dest="col_val", default='1',
help="column that contains the values")
self.OptionParser.add_option("-r", "--rotate", action="store",
type="inkbool", dest="rotate", default='False',
help="Draw barchart horizontally")
self.OptionParser.add_option("-W", "--bar-width", action="store",
type="int", dest="bar_width", default='10',
help="width of bars")
self.OptionParser.add_option("-p", "--pie-radius", action="store",
type="int", dest="pie_radius", default='100',
help="radius of pie-charts")
self.OptionParser.add_option("-H", "--bar-height", action="store",
type="int", dest="bar_height", default='100',
help="height of bars")
self.OptionParser.add_option("-O", "--bar-offset", action="store",
type="int", dest="bar_offset", default='5',
help="distance between bars")
self.OptionParser.add_option("", "--stroke-width", action="store",
type="int", dest="stroke_width", default='2')
self.OptionParser.add_option("-o", "--text-offset", action="store",
type="int", dest="text_offset", default='5',
help="distance between bar and descriptions")
self.OptionParser.add_option("-F", "--font", action="store",
type="string", dest="font", default='sans-serif',
help="font of description")
self.OptionParser.add_option("-S", "--font-size", action="store",
type="int", dest="font_size", default='10',
help="font size of description")
self.OptionParser.add_option("-C", "--font-color", action="store",
type="string", dest="font_color", default='black',
help="font color of description")
#Dummy:
self.OptionParser.add_option("","--input_sections")
self.OptionParser.add_option("-V", "--show_values", action="store",
type="inkbool", dest="show_values", default='False',
help="Show values in chart")
def effect(self):
"""
Effect behaviour.
Overrides base class' method and inserts a nice looking chart into SVG document.
"""
# Get script's "--what" option value and process the data type --- i concess the if term is a little bit of magic
what = self.options.what
keys=[]
values=[]
orig_values=[]
keys_present=True
pie_abs=False
cnt=0
csv_file_name=self.options.filename
csv_delimiter=self.options.csv_delimiter
input_type=self.options.input_type
col_key=self.options.col_key
col_val=self.options.col_val
show_values=self.options.show_values
if(input_type=="\"file\""):
csv_file=open(csv_file_name,"r")
for line in csv_file:
value=line.split(csv_delimiter)
if(len(value)>=1): #make sure that there is at least one value (someone may want to use it as description)
keys.append(value[col_key])
values.append(float(value[col_val]))
csv_file.close()
elif(input_type=="\"direct_input\""):
what=re.findall("([A-Z|a-z|0-9]+:[0-9]+\.?[0-9]*)",what)
for value in what:
value=value.split(":")
keys.append(value[0])
values.append(float(value[1]))
# Get script's "--type" option value.
charttype=self.options.type
if(charttype=="pie_abs"):
pie_abs=True
charttype="pie"
# Get access to main SVG document element and get its dimensions.
svg = self.document.getroot()
# Get the page attibutes:
width = self.getUnittouu(svg.get('width'))
height = self.getUnittouu(svg.attrib['height'])
# Create a new layer.
layer = inkex.etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'Chart-Layer: %s' % (what))
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
# Check if Blur should be drawn:
draw_blur=self.options.blur
#draw_blur=False
# Set Default Colors
self.options.colors_override.strip()
if (len(self.options.colors_override)>0):
Colors=self.options.colors_override
else:
Colors=self.options.colors
if(Colors[0].isalpha()):
Colors=nc_colors.get_color_scheme(Colors)
else:
Colors=re.findall("(#[0-9a-fA-F]{6})",Colors)
#to be sure we create a fallback:
if(len(Colors)==0):
Colors=nc_colors.get_color_scheme()
color_count=len(Colors)
if(self.options.reverse_colors):
Colors.reverse()
#Those values should be self-explaining:
bar_height=self.options.bar_height
bar_width=self.options.bar_width
bar_offset=self.options.bar_offset
#offset of the description in stacked-bar-charts:
#stacked_bar_text_offset=self.options.stacked_bar_text_offset
text_offset=self.options.text_offset
#get font
font=self.options.font
font_size=self.options.font_size
font_color=self.options.font_color
#get rotation
rotate = self.options.rotate
pie_radius=self.options.pie_radius
stroke_width=self.options.stroke_width
if(charttype=="bar"):
#########
###BAR###
#########
#iterate all values, use offset to draw the bars in different places
offset=0
color=0
# Normalize the bars to the largest value
try:
value_max=max(values)
except ValueError:
value_max=0.0
for x in range(len(values)):
orig_values.append(values[x])
values[x]=(values[x]/value_max)*bar_height
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Draw Single bars with their shadows
for value in values:
#draw blur, if it is wanted
if(draw_blur):
# Create shadow element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document. Make it horizontal or vertical
if(not rotate):
shadow.set('x', str(width / 2 + offset +1))
shadow.set('y', str(height / 2 - int(value)+1))
else:
shadow.set('y', str(width / 2 + offset +1))
shadow.set('x', str(height / 2 +1))
# Set shadow properties
if(not rotate):
shadow.set("width", str(bar_width))
shadow.set("height", str(int(value)))
else:
shadow.set("height", str(bar_width))
shadow.set("width", str(int(value)))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
#shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if(not rotate):
rect.set('x', str(width/2+offset))
rect.set('y', str(height/2-int(value)))
else:
rect.set('y', str(width/2+offset))
rect.set('x', str(height/2))
# Set rectangle properties
if(not rotate):
rect.set("width", str(bar_width))
rect.set("height", str(int(value)))
else:
rect.set("height", str(bar_width))
rect.set("width", str(int(value)))
rect.set("style","fill:"+Colors[color%color_count])
# Set shadow blur (connect to filter object in xml path)
if(draw_blur):
shadow.set("style","filter:url(#filter)")
# If keys are given create text elements
if(keys_present):
text = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
text.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
text.set("x", "-"+str(height/2+text_offset))
#x after rotation:
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
text.set("x", str(height/2-text_offset))
text.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:end;text-anchor:end;fill:"\
+font_color)
text.text=keys[cnt]
#cnt=cnt+1
# Increase Offset and Color
#offset=offset+bar_width+bar_offset
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
if(keys_present):
layer.append(text)
if(show_values):
vtext = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
vtext.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
vtext.set("x", "-"+str(height/2+text_offset-value-text_offset-text_offset))
#x after rotation:
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
vtext.set("x", str(height/2-text_offset+value+text_offset+text_offset))
vtext.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:start;text-anchor:start;fill:"\
+font_color)
vtext.text=str(int(orig_values[cnt]))
layer.append(vtext)
cnt=cnt+1
offset=offset+bar_width+bar_offset
elif(charttype=="pie"):
#########
###PIE###
#########
# Iterate all values to draw the different slices
color=0
# Set Default Colors
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Add a grey background circle
background=inkex.etree.Element(inkex.addNS("circle","svg"))
background.set("cx", str(width/2))
background.set("cy", str(height/2))
background.set("r", str(pie_radius))
if pie_abs:
background.set("style","stroke:#ececec;fill:#f9f9f9")
else:
background.set("style","fill:#aaaaaa;stroke:none")
layer.append(background)
#create value sum in order to divide the slices
try:
valuesum=sum(values)
except ValueError:
valuesum=0
if pie_abs:
valuesum=100
# Set an offsetangle
offset=0
# Draw single slices with their shadow
for value in values:
# Calculate the PI-angles for start and end
angle=(2*3.141592)/valuesum*float(value)
# Create the shadow first (if it should be created):
if(draw_blur):
shadow=inkex.etree.Element(inkex.addNS("path","svg"))
shadow.set(inkex.addNS('type', 'sodipodi'), 'arc')
shadow.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
shadow.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
shadow.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('start', 'sodipodi'), str(offset))
shadow.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
shadow.set("style","filter:url(#filter);fill:#000000")
#then add the slice
pieslice=inkex.etree.Element(inkex.addNS("path","svg"))
pieslice.set(inkex.addNS('type', 'sodipodi'), 'arc')
pieslice.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
pieslice.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
pieslice.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('start', 'sodipodi'), str(offset))
pieslice.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
pieslice.set("style","fill:"+Colors[color%color_count]+";stroke:none;fill-opacity:1")
#If text is given, draw short paths and add the text
if(keys_present):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width/2)+pie_radius*math.cos(angle/2+offset))+","+str((height/2)+pie_radius*math.sin(angle/2+offset))+" "+str((text_offset-2)*math.cos(angle/2+offset))+","+str((text_offset-2)*math.sin(angle/2+offset)))
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width/2)+(pie_radius+text_offset)*math.cos(angle/2+offset)))
text.set("y", str((height/2)+(pie_radius+text_offset)*math.sin(angle/2+offset)+font_size/3))
textstyle="font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color
#check if it is right or left of the Pie
if(math.cos(angle/2+offset)>0):
text.set("style",textstyle)
else:
text.set("style",textstyle+";text-align:end;text-anchor:end")
text.text=keys[cnt]
if show_values:
text.text=text.text+"("+str(values[cnt])
if pie_abs:
text.text=text.text+" %"
text.text=text.text+")"
cnt=cnt+1
layer.append(text)
#increase the rotation-offset and the colorcycle-position
offset=offset+angle
color=(color+1)%8
#append the objects to the extension-layer
if(draw_blur):
layer.append(shadow)
layer.append(pieslice)
elif(charttype=="stbar"):
#################
###STACKED BAR###
#################
# Iterate all values to draw the different slices
color=0
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
#create value sum in order to divide the bars
try:
valuesum=sum(values)
except ValueError:
valuesum=0.0
for value in values:
valuesum=valuesum+float(value)
# Init offset
offset=0
i=len(values)-1 #loopcounter
# Draw Single bars with their shadows
for value in values:
# Calculate the individual heights normalized on 100units
normedvalue=(bar_height/valuesum)*float(value)
if(draw_blur):
# Create rectangle element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document.
if(not rotate):
shadow.set('x', str(width / 2 + 1))
shadow.set('y', str(height / 2 - offset - (normedvalue)+1))
else:
shadow.set('x', str(width / 2 + 1 + offset))
shadow.set('y', str(height / 2 +1))
# Set rectangle properties
if(not rotate):
shadow.set("width",str(bar_width))
shadow.set("height", str((normedvalue)))
else:
shadow.set("width",str((normedvalue)))
shadow.set("height", str(bar_width))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if( not rotate ):
rect.set('x', str(width / 2 ))
rect.set('y', str(height / 2 - offset - (normedvalue)))
else:
rect.set('x', str(width / 2 + offset ))
rect.set('y', str(height / 2 ))
# Set rectangle properties
if( not rotate ):
rect.set("width", str(bar_width))
rect.set("height", str((normedvalue)))
else:
rect.set("height", str(bar_width))
rect.set("width", str((normedvalue)))
rect.set("style","fill:"+Colors[color%color_count])
#If text is given, draw short paths and add the text
if(keys_present):
if(not rotate):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width+bar_width)/2)+","+str(height / 2 - offset - (normedvalue / 2))+" "+str(bar_width/2+text_offset)+",0")
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str(width/2+bar_width+text_offset+1))
text.set("y", str(height / 2 - offset + font_size/3 - (normedvalue / 2)))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[cnt]
cnt=cnt+1
layer.append(text)
else:
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width)/2+offset+normedvalue/2)+","
+str(height / 2 + bar_width/2)
+" 0,"+str(bar_width/2+(font_size*i)+text_offset)) #line
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width)/2+offset+normedvalue/2-font_size/3))
text.set("y", str((height/2)+bar_width+(font_size*(i+1))+text_offset ))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[color]
layer.append(text)
# Increase Offset and Color
offset=offset+normedvalue
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
i-=1 #loopcounter
def getUnittouu(self, param):
#compatibility wrapper
try:
return inkex.unittouu(param)
except AttributeError:
return self.unittouu(param)
# Create effect instance and apply it.
effect = NiceChart()
effect.affect()
|
gpl-3.0
| 8,903,890,283,883,620,000
| 33.852941
| 230
| 0.639129
| false
| 3.018906
| false
| false
| false
|
nirs/hpy
|
hpy/htokenize.py
|
1
|
12383
|
"""Tokenization help for Python programs.
This is tokenize module from Python 2.4.3 with minor modification needed to
support Hebrew tokens.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found.
@license: Python license.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
"generate_tokens", "NL"]
del x
del token
from hpy import hebrew
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = ur'[a-zA-Z_%s]\w*' % ''.join(hebrew.alpha)
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
pseudoprog = re.compile(PseudoToken, re.U)
tokenprog = re.compile(Token, re.U)
single3prog = re.compile(Single3, re.U)
double3prog = re.compile(Double3, re.U)
endprogs = {"'": re.compile(Single, re.U), '"': re.compile(Double, re.U),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string.
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars = string.ascii_letters + '_' + ''.join(hebrew.alpha)
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
line = readline()
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level")
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (parenlev > 0 and NL or NEWLINE,
token, spos, epos, line)
elif initial == '#':
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
gpl-2.0
| -847,647,740,305,693,600
| 39.074434
| 78
| 0.515465
| false
| 3.779915
| false
| false
| false
|
thp/backuppurge
|
lib/backuppurge/__init__.py
|
1
|
7956
|
# -*- coding: utf-8 -*-
#
# backuppurge: Selectively purge daily full backups
#
# Copyright (c) 2013, 2015 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Selectively purge daily full backups
Lists files in that should be purged in a backup strategy where daily backups
are kept for *DAYS* days, monthly backups for *MONTHS* months and yearly backups
for *YEARS* years. Monthly and yearly backups are always the oldest possible
daily backup (e.g. first of month and first of year that is available).
Files are expected to have their date embedded as ``YYYY-MM-DD`` somewhere in
the filename, e.g. ``homedir-2013-03-31.tgz``
For monthly and yearly backups, the first day available will be kept (e.g.
January 1st for yearly, but if that is not available, January 2nd will be
kept, etc..).
This program can be used together with xargs(1) from GNU findutils::
backuppurge --print0 /var/backups/ | xargs -r -0 rm
Only files directly in the specified **DIRECTORY** will be searched (in the
above example, ``/var/backups/homedir-2013-03-31.tgz`` will be considered,
but not ``/var/backups/etc/etc-2013-03-31.tgz``). This prevents accidental
deletion of files. If --include-directories (-D) is used, directories directly
below the path will be included in the search (e.g. the directory
``/var/backups/etc-2015-07-24/`` will be included in the purge search).
This script assumes daily backups are FULL backups, not incremental. For
example, a full daily backup of your ``/etc`` can be created by adding
(``crontab -e``) a command like the following to your crontab(5) file::
tar czf /var/backups/etc/etc-$(date +%F).tgz /etc
"""
from __future__ import print_function
import logging
import warnings
import datetime
import re
import os
__author__ = 'Thomas Perl <m@thp.io>'
__license__ = 'Simplified BSD License'
__url__ = 'http://thp.io/2013/backuppurge/'
__version__ = '1.0.4'
class MixedFilenames(BaseException):
"""
Raised when the list of filenames passed to PurgeList don't have
the same prefix and postfix (before/after the date).
"""
pass
class NoBackupsFound(Warning):
"""
Warning raised when no backup files (with a date) are found.
"""
pass
warnings.simplefilter('always', NoBackupsFound)
def find_backups(directory, include_directories):
"""
Find backup files in directory
"""
return filter(lambda f: (include_directories and os.path.isdir(f)) or os.path.isfile(f),
map(lambda filename: os.path.join(directory, filename), os.listdir(directory)))
class PurgeList:
def __init__(self, filenames, today, prefix):
self.logger = logging.getLogger(self.__class__.__name__)
self.filenames = filenames
self.today = today
self.prefix = prefix
# Check prefix of files (before date), bail out if not all equal
self.check_file_list()
# By default, purge everything
self.purge = set(self.filenames)
def check_file_list(self):
regex = re.compile(r'^(.*)(\d{4}-\d{2}-\d{2})(.*)$')
# Remove all filenames without a date string in them
self.filenames = list(filter(regex.match, self.filenames))
if self.prefix is not None:
self.filenames = [filename for filename in self.filenames
if regex.match(filename).group(1) == self.prefix]
if len(self.filenames) == 0:
warnings.warn('File list is empty', NoBackupsFound)
return
prefixes, _, postfixes = map(set, zip(*[regex.match(filename).groups()
for filename in self.filenames]))
if len(prefixes) != 1:
raise MixedFilenames('Non-unique prefixes: {0}'.format(prefixes))
if len(postfixes) != 1:
raise MixedFilenames('Non-unique postfixes: {0}'.format(postfixes))
def keep(self, filename, kind):
"""Mark filename to be kept"""
if filename is None:
return
if filename in self.purge:
self.logger.info('Keeping file for %s: %s', kind, filename)
self.purge.remove(filename)
else:
self.logger.debug('File for %s already kept: %s', kind, filename)
def get_all(self, year, month=None, day=None):
"""Get all backups for a specific year"""
month_re = r'{0:02d}'.format(month) if month else r'\d{2}'
day_re = r'{0:02d}'.format(day) if day else r'\d{2}'
regex = re.compile(r'{0:04d}-{1:s}-{2:s}'.format(year, month_re, day_re))
return sorted(filter(regex.search, self.filenames))
def get_first(self, year, month=None, day=None):
"""Get first backup for a specific year, month or day
get_first(2013) -> First available backup in 2013
get_first(2013, 3) -> First available backup in March 2013
get_first(2013, 3, 31) -> First available backup for March 31st 2013
"""
matches = self.get_all(year, month, day)
if matches:
return matches[0]
return None
def recent_days(self, count):
day = self.today
while count > 0:
yield (day.year, day.month, day.day)
day -= datetime.timedelta(days=1)
count -= 1
def recent_months(self, count):
month = (self.today.year, self.today.month)
while count > 0:
yield month
if month[1] == 1:
month = (month[0]-1, 12)
else:
month = (month[0], month[1]-1)
count -= 1
def recent_years(self, count):
year = self.today.year
while count > 0:
yield year
year -= 1
count -= 1
def keep_daily(self, days):
for year, month, day in self.recent_days(days):
self.keep(self.get_first(year, month, day), 'daily')
def keep_monthly(self, months):
for year, month in self.recent_months(months):
self.keep(self.get_first(year, month),
'monthly ({0}-{1:02d})'.format(year, month))
def keep_yearly(self, years):
for year in self.recent_years(years):
self.keep(self.get_first(year), 'yearly ({0})'.format(year))
def get_filenames(self):
return self.purge
def main(directory, days, months, years, separator, include_directories, prefix):
today = datetime.date.today()
filenames = find_backups(directory, include_directories)
purge_list = PurgeList(filenames, today, prefix)
purge_list.keep_daily(days)
purge_list.keep_monthly(months)
purge_list.keep_yearly(years)
purge_files = purge_list.get_filenames()
if purge_files:
print(separator.join(purge_files), end=separator)
|
bsd-2-clause
| -5,817,171,671,719,729,000
| 33.894737
| 97
| 0.653343
| false
| 3.813998
| false
| false
| false
|
goblinhack/MundusMeus
|
python/things/weapon.py
|
1
|
13288
|
import tp
import mm
def thing_init(t):
return
def weapon_init(name, short_name, long_name, damage, is_double_handed=False):
x = tp.Tp(name)
x.set_long_name(long_name)
x.set_short_name(short_name)
x.set_is_weapon(True)
x.set_z_depth(mm.Z_DEPTH_TREASURE)
x.set_damage(damage)
x.set_is_double_handed(is_double_handed)
x.set_tile(tile=name)
x.thing_init = thing_init
def init():
weapon_init(name="axe1.1",
short_name="Hand Axe",
long_name="Very handy axe. Useful for all axeing occasions.",
damage="1d4",
is_double_handed=True
)
weapon_init(name="axe1.2",
short_name="Battle Axe",
long_name="Dont battle without this axe.",
damage="1d6"
)
weapon_init(name="axe1.3",
short_name="Greataxe",
long_name="This axe is great indeed. " +
"Not the greatest, but still pretty great.",
damage="1d8+1",
is_double_handed=True
)
weapon_init(name="axe1.4",
short_name="Even Greater Axe",
long_name="The greatest of great great axes.",
damage="1d10+2",
is_double_handed=True
)
weapon_init(name="axe1.5",
short_name="Masterwork Axe",
long_name="Finest craftwork axe. Definately not made by orcs.",
damage="1d12"
)
weapon_init(name="axe1.6",
short_name="Diamond Axe",
long_name="Diamond encrusted bladed axe. " +
"Glistens in the dark.",
damage="1d14"
)
weapon_init(name="axe1.7",
short_name="Blood Axe",
long_name="This axe yearns to be whetted with blood. "
"Hopefully not your own.",
damage="2d6+2"
)
weapon_init(name="axe1.9",
short_name="Cleaver Axe",
long_name="An edge so sharp, " +
"you might lose your head over it.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="ball_chain1.1",
short_name="Flail",
long_name="Don't flail around with this flail.",
damage="1d4"
)
weapon_init(name="ball_chain1.2",
short_name="Masterwork Flail",
long_name="If you need to flail, this is the weapon for you.",
damage="1d6"
)
weapon_init(name="ball_chain1.3",
short_name="Diamond Flail",
long_name="Flailing with a sharp edge.",
damage="1d12"
)
weapon_init(name="bow1.1",
short_name="Bow",
long_name="Standard issue bow. Wooden. " +
"Bowish. What more can be said?",
damage="1d6",
is_double_handed=True
)
weapon_init(name="bow1.2",
short_name="Longishbow",
long_name="Not quite a long bow, but long enough.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="bow1.3",
short_name="Metal Longbow",
long_name="A tough bow for a tough individual.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="bow1.4",
short_name="Bowmaster",
long_name="The bow of masters. The bow master.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="bow1.5",
short_name="Masterwork Bow",
long_name="Beautiful oaken bow with inlaid markings " +
"and a silver handle. Probably fires well too.",
damage="1d12",
is_double_handed=True
)
weapon_init(name="bow1.6",
short_name="Crossbow",
long_name="If your angry and have targets, " +
"this is the bow for you. " +
"No archery training required.",
damage="1d6",
is_double_handed=True
)
weapon_init(name="bow1.7",
short_name="Metal cross",
long_name="Resounding thuds will come from " +
"this device. And screams.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="bow1.8",
short_name="Masterwork cross",
long_name="It's a weapon of pointy death, " +
"but it's beautifully made. Shiny.",
damage="1d6",
is_double_handed=True
)
weapon_init(name="mace1.1",
short_name="Mace",
long_name="No powder here, this is a serious mace, " +
"made for resounding head impacts.",
damage="1d8"
)
weapon_init(name="mace1.2",
short_name="War Mace",
long_name="If you need to go to war, you need this mace.",
damage="1d10"
)
weapon_init(name="quiver1.1",
short_name="Arrows",
long_name="Standard issue ACME arrows.",
damage="1d6"
)
weapon_init(name="quiver1.2",
short_name="Flame Arrows",
long_name="Arrows that will ingite on use. No returns.",
damage="1d6"
)
weapon_init(name="quiver1.3",
short_name="Energy Arrows",
long_name="Arrows that transform into beams of " +
"energy on use. No kidding.",
damage="1d6"
)
weapon_init(name="quiver1.4",
short_name="Acid Arrows",
long_name="Don't touch the end of these arrows. " +
"And don't try and taste them either.",
damage="1d6"
)
weapon_init(name="stick1.1",
short_name="Just a stick",
long_name="Sticky the stick.",
damage="1d4"
)
weapon_init(name="stick1.2",
short_name="Flame Stick",
long_name="Sticky the stick, burning version.",
damage="1d4"
)
weapon_init(name="stick1.3",
short_name="Magic Stick",
long_name="It's a magically enhanced stick... " +
"Who would believe that?",
damage="1d4+1"
)
weapon_init(name="stick2.1",
short_name="Stick V2",
long_name="Sticky the stick, mildly improved version.",
damage="1d4+2"
)
weapon_init(name="stick2.2",
short_name="Hooked Stick",
long_name="Great stick for inflicting a bit of extra " +
"damage than your common stick.",
damage="1d4+3"
)
weapon_init(name="stick2.3",
short_name="Gnarly Stick",
long_name="An oaken stick with gnarly stuff on " +
"the business end. Good for hitting things with.",
damage="1d4+4"
)
weapon_init(name="stick2.4",
short_name="Battle Stick",
long_name="The stick of the professional peasant.",
damage="1d6"
)
weapon_init(name="sword1.1",
short_name="Shortest Sword",
long_name="The shortest of short swords.",
damage="1d4"
)
weapon_init(name="sword1.2",
short_name="Short Sword",
long_name="The second shortest of short swords.",
damage="1d6"
)
weapon_init(name="sword1.3",
short_name="Needle Sword",
long_name="A sword with a point so fine it will " +
"pierce you to the heart.",
damage="1d4+2"
)
weapon_init(name="sword1.4",
short_name="Meat Cleaver",
long_name="Not exactly a skillful weapon, but it does " +
"the job. The job of a lunatic.",
damage="1d6"
)
weapon_init(name="sword1.5",
short_name="Ice Shortsword",
long_name="It's short, blue and icy.",
damage="1d6+1"
)
weapon_init(name="sword1.6",
short_name="Platinum Shortsword",
long_name="Of short swords, this is one of the best. " +
"Durable, short and shiny.",
damage="1d8"
)
weapon_init(name="sword1.7",
short_name="Flaming Shortsword",
long_name="Mesmerizing blade. Flame ripples along its edges.",
damage="1d6+2"
)
weapon_init(name="sword1.8",
short_name="Gladius",
long_name="Wide bladed Roman style sword. " +
"Great for leaving big wounds.",
damage="1d4+3"
)
weapon_init(name="sword1.9",
short_name="Dao",
long_name="Wicked curved blade.",
damage="1d6+2"
)
weapon_init(name="sword1.10",
short_name="Khopesh",
long_name="The oriental blade of the professional.",
damage="1d6+4"
)
weapon_init(name="sword1.11",
short_name="Long Sword",
long_name="It's long. And a sword.",
damage="1d8",
is_double_handed=True
)
weapon_init(name="sword1.12",
short_name="Claymore",
long_name="The sword of the Highlander. This sword " +
"will give you your freedom. Or someone elses.",
damage="1d8+2",
is_double_handed=True
)
weapon_init(name="sword1.13",
short_name="Greatsword",
long_name="It's a sword and it's great.",
damage="1d10",
is_double_handed=True
)
weapon_init(name="sword1.14",
short_name="Masterwork Greatsword",
long_name="Don't mess around. Get this great sword.",
damage="1d10+2",
is_double_handed=True
)
weapon_init(name="sword1.15",
short_name="Platinum Greatsword",
long_name="They don't come much tougher than this.",
damage="1d12+5",
is_double_handed=True
)
weapon_init(name="sword1.16",
short_name="Flaiming Greatsword",
long_name="Dismember and cook your enemies.",
damage="1d10+3",
is_double_handed=True
)
weapon_init(name="sword1.17",
short_name="Serrated Sword",
long_name="Slice and dice with greatness.",
damage="1d6+4"
)
weapon_init(name="sword1.18",
short_name="Ulfbehrt",
long_name="Quality hybrid of Viking and Knightly sword",
damage="1d8+2",
is_double_handed=True
)
weapon_init(name="sword1.19",
short_name="Khanda",
long_name="Double edged straight sword",
damage="1d10"
)
weapon_init(name="sword1.20",
short_name="Ice Sword",
long_name="Ice ice sword.",
damage="1d10+3"
)
weapon_init(name="sword1.22",
short_name="Zweihander",
long_name="Massive two handed ultra great sword.",
damage="1d12+6",
is_double_handed=True
)
weapon_init(name="sword_wooden1.1",
short_name="Wooden Sword aka Stick",
long_name="It's a stick",
damage="1d4"
)
weapon_init(name="warhammer1.1",
short_name="Maul",
long_name="Long handled warhammer with metal head",
damage="1d8"
)
weapon_init(name="warhammer1.2",
short_name="Warhammer",
long_name="It's a hammer. For war.",
damage="1d10+1",
is_double_handed=True
)
weapon_init(name="warhammer1.3",
short_name="Masterwork Warhammer",
long_name="A war hammer of distinction.",
damage="1d12+2",
is_double_handed=True
)
init()
|
lgpl-3.0
| -4,042,375,541,105,230,000
| 36.643059
| 79
| 0.45748
| false
| 3.79332
| false
| false
| false
|
DreamerBear/awesome-py3-webapp
|
www/core/common/apis.py
|
1
|
2526
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2017/10/18 16:34
# @Author : xxc727xxc (xxc727xxc@foxmail.com)
# @Version : 1.0.0
'''
JSON API definition
'''
class Page(object):
'''
Page object for display pages.
'''
def __init__(self, item_count, page_index=1, page_size=10):
'''
Init Pagination by item_count, page_index and page_size.
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
class APIError(Exception):
def __init__(self, error, data='', message=''):
super().__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
'''
Indicate the input value has error or invalid. The data specifies the error field of input form.
'''
def __init__(self, field, message=''):
super().__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
'''
Indicate the resource was not found. The data specifies the resource name.
'''
def __init__(self, field, message=''):
super().__init__('value:notfound', field, message)
class APIPermissionError(APIError):
'''
Indicate the api has no permission.
'''
def __init__(self, message=''):
super().__init__('permission:forbidden', 'permission', message)
|
gpl-3.0
| -5,158,730,437,111,365,000
| 25.589474
| 105
| 0.538797
| false
| 3.547753
| false
| false
| false
|
trbarrettjr/research-apps
|
projected.py
|
1
|
1091
|
#!/usr/bin/env python
import re
import sys
def getData(data):
name = re.search(r'\w\s\w\s[\w]+|\w\s{3}\w+', data)
employee = name.group()
# Headers are as follows below:
# Event, Date, Time, Train, OD Date, OD Time
tuples = re.findall(r'(\w+|\w+\s\w+|\w+\s\w+\s\w+|[\w-]+)\s+(\d+)\s(\d+)\s(\w+)\s(\d+)\s(\d+)', data)
#print employee
#print tuples
outFileName = employee + '.csv'
employeeName = employee + '\n'
header = 'Event Type,Date Checked,Time Checked,Train Projected,Date Projected,Time Projected\n'
outfile = open(outFileName, 'w')
outfile.write(employeeName)
outfile.write(header)
for projected in tuples:
(type, checkDate, checkTime, train, odDate, odTime) = projected
outdata = type + ",'" + checkDate + ",'" + checkTime + "," + train + ",'" + odDate + ",'" + odTime + "\n"
outfile.write(outdata)
def main():
if len(sys.argv) >= 2:
filename = sys.argv[1]
else:
print 'Missing: filename'
sys.exit(1)
f = open(filename, 'r')
text = f.read()
f.close()
getData(text)
if __name__ == '__main__':
main()
|
cc0-1.0
| 3,289,933,708,136,195,600
| 23.244444
| 109
| 0.594867
| false
| 2.878628
| false
| false
| false
|
GNOME/chronojump-server
|
chronojumpserver/views.py
|
1
|
9513
|
# -*- coding: utf-8 -*-
"""Chronojump Server views controller."""
from chronojumpserver import app
from flask import render_template, request, redirect, url_for, abort, flash
from urlparse import urlparse, urljoin
from flask_wtf.file import FileField
from chronojumpserver.models import Person, Station, RFIDHistory, User, Group, GroupCoach, GroupPerson, Coach
from chronojumpserver.forms import PersonForm, LoginForm
from flask_login import login_required, login_user, logout_user, current_user
from chronojumpserver.database import db_session
import os
from time import time
def is_safe_url(target):
"""
Snippet to check if the url is safe, specially when coming
from login action.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
@app.route('/')
@login_required
def index():
"""Chronojump Server Home page."""
return render_template('index.html')
@app.route('/home')
def airport():
"""Airport mode."""
#stations = [ station.serialize for station in Station.query.filter(Station.type != 'S')]
stations = [ station.serialize for station in Station.query.all()]
players = [ player.serialize for player in Person.query.all()]
return render_template('airport.html', stations=stations, players=players)
@app.route('/results')
@login_required
def show_results():
user_id = current_user.id
coach = Coach.query.filter(Coach.user_id == user_id).first()
groups = []
groups_by_coach = [ g.id for g in GroupCoach.query.filter(GroupCoach.coach_id == coach.id)]
for g in Group.query.filter(Group.id.in_(groups_by_coach)):
groups.append({
'id': g.id,
'name': g.name.decode('utf-8')
})
return render_template('results.html', groups=groups, coach_id=coach.id, org_id=2)
@app.route('/sprints')
@login_required
def show_sprints():
"""Show sprints view."""
return render_template('sprints.html')
@app.route('/player_list')
def show_players():
"""Show players view."""
stations = []
for station in Station.query.filter(Station.exercises is not None ):
stations.append({
'id': station.id,
'name': station.name.decode('utf-8'),
'type': station.type
})
return render_template('player_list.html', stations=stations)
@app.route('/stations')
@login_required
def show_stations():
"""Show Stations and Exercises."""
stations = []
for station in Station.query.all():
stations.append({
'id': station.id,
'name': station.name.decode('utf-8'),
'type': station.type
})
return render_template('station_list.html', stations=stations)
def _update_player_photo(player_id, photo, previous_imageName):
"""Update the photo of the player, and return the path."""
# First remove the previous photo
if previous_imageName:
previous_path = os.path.join('chronojumpserver',
app.config['UPLOAD_FOLDER'],
previous_imageName)
# Remove if exists
if os.path.exists(previous_path):
os.unlink(previous_path)
# Set the new photo filename
new_photo = 'player_' + str(player_id) + '_' + str(int(time()))
full_path = os.path.join('chronojumpserver',
app.config['UPLOAD_FOLDER'],
new_photo)
# save the photo in the disk
photo.save(full_path)
# Update the photo in the database
db_session.query(Person).filter_by(id=player_id).update({
"imageName": new_photo
})
# Commit the changes
db_session.commit()
@app.route('/player/<player_id>', methods=['GET', 'POST'])
@login_required
def player_detail(player_id):
"""Show players detail."""
has_errors = False
msg = None
# Get the player id passed by argument
player = Person.query.filter(Person.id == player_id).first()
form = PersonForm()
if request.method == "GET":
form.fullname.data = player.name.decode('utf-8')
form.height.data = player.height
form.weight.data = player.weight
form.rfid.data = player.rfid
elif request.method == "POST":
# Save the image in photos folder
if form.validate_on_submit():
"""Form Valid. Update the player."""
# Update the player
db_session.query(Person).filter_by(id=player_id).update({
"name": form.fullname.data,
"height": form.height.data,
"weight": form.weight.data,
"rfid": form.rfid.data
})
# Commit the changes
db_session.commit()
# If a new photo has passed, update it too
# Check if a photo has been passed too
if form.photo.data:
_update_player_photo(player_id, form.photo.data, player.imageName)
# If rfid is new, add the new rfid into history table
r = RFIDHistory.query.filter(RFIDHistory.rfid == form.rfid.data).first()
if not r:
# Add this new rfid into rfidHistory table
r = RFIDHistory(rfid=form.rfid.data,
person_id=player_id)
db_session.add(r)
db_session.commit()
# Update done
msg = "Les dades del jugador %s s'han guardat correctament." % form.fullname.data
else:
# There are some errors in the form
msg = 'Hi han hagut errors, revisa el formulari.'
has_errors = True
form.photo.data = player.imageName
return render_template('player_detail.html', form=form, msg=msg,
has_errors=has_errors)
@app.route('/player/add', methods=['GET', 'POST'])
@login_required
def add_player():
"""Show form to add a new player."""
has_errors = False
msg = None
form = PersonForm()
if request.method == "POST":
if form.validate_on_submit():
"""Form is valid, add the new player."""
player = Person(
name=form.fullname.data,
height=form.height.data,
weight=form.weight.data,
rfid=form.rfid.data
)
db_session.add(player)
# Commit the changes
db_session.commit()
# If a photo has given, update after person creation
if form.photo.data:
_update_player_photo(player.id, form.photo.data, None)
# Add the rfid into rfidHistory table
r = RFIDHistory(rfid=form.rfid.data,
person_id=player.id)
db_session.add(r)
db_session.commit()
msg = "Ej jugador %s s'ha creat correctament." % (form.fullname.data,)
return redirect('/player_list')
else:
# There are some errors in the form
msg = 'Hi han hagut errors, revisa el formulari.'
has_errors = True
else:
"""To remove None default values in the form."""
form.fullname.data = ""
form.rfid.data = ""
return render_template('player_detail.html', form=form, msg=msg,
has_errors=has_errors)
@app.route('/login', methods=['GET', 'POST'])
def login():
# Here we use a class of some kind to represent and validate our
# client-side form data. For example, WTForms is a library that will
# handle this for us, and we use a custom LoginForm to validate.
form = LoginForm()
if request.method == "GET":
form.organization.data = ""
form.coach.data = ""
form.password.data = ""
if form.validate_on_submit():
import md5
username = form.coach.data
password = md5.md5(form.password.data).hexdigest()
print password
user = User.query.filter(User.username == username).first()
if user:
print "DEBUG: User %s found" % user.username
print user.password
if password == user.password:
print "DEBUG: Passwords match. Allow login"
# Login and validate the user.
# user should be an instance of your `User` class
login_user(user)
flash('Logged in successfully.')
next = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(next):
return abort(400)
return redirect(next or url_for('index'))
else:
# Invalid password
error_msg = u"Contrasenya invàlida"
return render_template('login.html', form=form, error_msg=error_msg)
else:
# Invalid user
error_msg = u"El usuari %s no existeix!" % username
return render_template('login.html', form=form, error_msg=error_msg)
return render_template('login.html', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
# Networks changes
@app.route("/group-players")
@login_required
def show_groups_and_players():
return render_template('groups_and_players.html')
|
agpl-3.0
| -1,233,406,085,584,981,500
| 33.32491
| 109
| 0.589924
| false
| 3.850952
| false
| false
| false
|
f41c0r/Cipher-Frequency-Analyzer
|
lettercounter.py
|
1
|
2928
|
#!/usr/bin/env python2
#fills a sqlite3 database with data from a plaintext file
import sys
import re
myFile = open(sys.argv[1],"r")
line = myFile.readline()
myDict = {}
myDict2 = {}
myDict3 = {}
totalLength = 0
freqDict = {"a" : 8.167, "b" : 1.49, "c" : 2.78, "d" : 4.253, "e" : 12.702, "f" : 2.228, "g" : 2.015, "h" : 6.094, "i" : 6.966, "j" : 0.153, "k" : 0.772, "l" : 4.025, "m" : 2.406, "n" : 6.749, "o" : 7.507, "p" : 1.929, "q" : 0.095, "r" : 5.987, "s" : 6.327, "t" : 9.056, "u" : 2.758, "v" : 0.978, "w" : 2.360, "x" : 0.150, "y" : 1.974, "z" : 0.07}
firstletterlength = 0
while line:
for i, letter in enumerate(line):
#comment out the next time if the " " is also encrypted
if letter is not " ":
if i == 0:
myDict3[letter] = 1
firstletterlength +=1
if i !=0 and line[i-1] == " ":
if letter in myDict3:
myDict3[letter] += 1
firstletterlength +=1
else:
myDict3[letter] = 1
firstletterlength +=1
if letter in myDict:
myDict[letter] += 1
else:
myDict[letter] = 1
# comment out if " " is also encrypted
totalLength += len(re.sub(" ","",line))
# comment out if " " is NOT encrypted
# totalLength += len(line)
line = myFile.readline()
myFile.close()
print
print "PERCENTAGES OF FIRST LETTERS"
print
for k in sorted(myDict3, key=lambda k: myDict3[k], reverse=True):
print (k,':', str(100 * float(float(myDict3[k]) / float(firstletterlength))))
#
print
print "TOTAL FREQUENCIES PER LETTER IN CIPHERTEXT:"
print
for letter in myDict:
print (letter,':',str(myDict[letter]))
print
print "FREQUENCIES IN CIPHERTEXT IN ALPHABETICAL ORDER WITH PERCENTAGES"
print
for letter in myDict:
myDict2[letter] = float(float(myDict[letter]) / float(totalLength)) * 100
print (letter,':',str(myDict2[letter]))
reverseFreqDict = {}
listFreq = []
print
print "FREQUENCES IN THE ENGLISH LANGUAGE IN ALPHABETICAL ORDER WITH PERCENTAGES"
print
for letter in freqDict:
print (letter, ":",str(freqDict[letter]))
reverseFreqDict[freqDict[letter]] = letter
listFreq.append(freqDict[letter])
print
print "LETTERS IN THE ENGLISH LANGUAGE IN ORDER OF FREQUENCY:"
print
listFreq = sorted(listFreq,reverse=True)
for number in listFreq:
print(reverseFreqDict[number], ":", str(number))
print
print "LETTERS IN CIPHERTEXT IN ORDER OF FREQUENCY"
print
reverseFreqDict = {}
listFreq = []
for letter in myDict2:
if myDict2[letter] not in reverseFreqDict:
reverseFreqDict[myDict2[letter]] = letter
else:
reverseFreqDict[myDict2[letter]] = reverseFreqDict[myDict2[letter]] + "," + letter
listFreq.append(myDict2[letter])
listFreq = sorted(listFreq,reverse=True)
for number in listFreq:
print(reverseFreqDict[number], ":", str(number))
|
gpl-3.0
| -4,992,788,253,731,573,000
| 30.483871
| 347
| 0.614754
| false
| 3.015448
| false
| false
| false
|
mhrivnak/crane
|
tests/test_app.py
|
2
|
3074
|
import logging
from flask import Flask
import mock
import unittest2
from crane import app, config, app_util, exceptions, search
from crane.search import GSA
from crane.views import v1
from . import demo_data
@mock.patch('os.environ.get', spec_set=True, return_value=demo_data.demo_config_path)
class TestCreateApp(unittest2.TestCase):
def setUp(self):
super(TestCreateApp, self).setUp()
with mock.patch('crane.app.init_logging') as mock_init_logging:
self.app = app.create_app()
# hold this so one of the tests can inspect it
self.mock_init_logging = mock_init_logging
def test_returns_app(self, mock_environ_get):
self.assertIsInstance(self.app, Flask)
def test_loads_config(self, mock_environ_get):
self.assertTrue(config.KEY_DATA_DIR in self.app.config)
def test_blueprints_loaded(self, mock_environ_get):
self.assertTrue(v1.section.name in self.app.blueprints)
def test_handlers_added(self, mock_environ_get):
handlers = self.app.error_handler_spec[None][None]
self.assertEquals(handlers[0], (exceptions.HTTPError,
app_util.http_error_handler))
def test_calls_init_logging(self, mock_environ_get):
self.mock_init_logging.assert_called_once_with()
def test_calls_search(self, mock_environ_get):
# reset to the default state
search.backend = search.SearchBackend()
# run the "create_app", which because of the mock_environ_get, will load
# our demo config. That config has GSA info.
with mock.patch('crane.app.init_logging'):
app.create_app()
# this will only be true if the search config was parsed
self.assertIsInstance(search.backend, GSA)
@mock.patch('logging.Logger.addHandler', spec_set=True)
class TestInitLogging(unittest2.TestCase):
def test_adds_handler(self, mock_add_handler):
app.create_app()
# make sure it was called
self.assertEqual(mock_add_handler.call_count, 1)
# make sure the first argument is the right type
self.assertIsInstance(mock_add_handler.call_args[0][0], logging.Handler)
# make sure the first argument was the only argument
mock_add_handler.assert_called_once_with(mock_add_handler.call_args[0][0])
@mock.patch('logging.Logger.setLevel', spec_set=True)
class TestSetLogLevel(unittest2.TestCase):
def setUp(self):
super(TestSetLogLevel, self).setUp()
with mock.patch('crane.app.init_logging') as mock_init_logging:
self.app = app.create_app()
def test_debug(self, mock_set_level):
self.app.config['DEBUG'] = True
app.set_log_level(self.app)
# make sure it set the level to debug
mock_set_level.assert_called_once_with(logging.DEBUG)
def test_not_debug(self, mock_set_level):
self.app.config['DEBUG'] = False
app.set_log_level(self.app)
# make sure it did not change the log level
self.assertEqual(mock_set_level.call_count, 0)
|
gpl-2.0
| -1,201,378,466,537,246,000
| 35.164706
| 85
| 0.670787
| false
| 3.607981
| true
| false
| false
|
guydavis/lane-detect
|
older/lane_detect.v2.py
|
1
|
6587
|
#
# Attempting to replicate lane detection results described in this tutorial by Naoki Shibuya:
# https://medium.com/towards-data-science/finding-lane-lines-on-the-road-30cf016a1165
# For more see: https://github.com/naokishibuya/car-finding-lane-lines
#
# This 2nd version does a much better job of processing images.
#
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import sys
import subprocess
import os
import shutil
def convert_hls(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
def select_white_yellow(image):
converted = convert_hls(image)
lower = np.uint8([ 0, 200, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(converted, lower, upper)
lower = np.uint8([ 10, 0, 100])
upper = np.uint8([ 40, 255, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_and(image, image, mask = mask)
def convert_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def apply_smoothing(image, kernel_size=15):
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def detect_edges(image, low_threshold=50, high_threshold=150):
return cv2.Canny(image, low_threshold, high_threshold)
def filter_region(image, vertices):
mask = np.zeros_like(image)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2])
return cv2.bitwise_and(image, mask)
def select_region(image):
rows, cols = image.shape[:2]
bottom_left = [cols*0.1, rows*0.95]
top_left = [cols*0.4, rows*0.6]
bottom_right = [cols*0.9, rows*0.95]
top_right = [cols*0.6, rows*0.6]
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
def hough_lines(image):
return cv2.HoughLinesP(image, rho=1, theta=np.pi/180, threshold=20, minLineLength=20, maxLineGap=300)
def average_slope_intercept(lines):
left_lines = []
left_weights = []
right_lines = []
right_weights = []
for line in lines:
for x1, y1, x2, y2 in line:
if x2==x1:
continue
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope*x1
length = np.sqrt((y2-y1)**2+(x2-x1)**2)
if slope < 0:
left_lines.append((slope, intercept))
left_weights.append((length))
else:
right_lines.append((slope, intercept))
right_weights.append((length))
left_lane = np.dot(left_weights, left_lines) /np.sum(left_weights) if len(left_weights) >0 else None
right_lane = np.dot(right_weights, right_lines)/np.sum(right_weights) if len(right_weights)>0 else None
return left_lane, right_lane
def make_line_points(y1, y2, line):
if line is None:
return None
slope, intercept = line
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def lane_lines(image, lines):
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0]
y2 = y1*0.6
left_line = make_line_points(y1, y2, left_lane)
right_line = make_line_points(y1, y2, right_lane)
return left_line, right_line
def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=20):
line_image = np.zeros_like(image)
for line in lines:
if line is not None:
cv2.line(line_image, *line, color, thickness)
return cv2.addWeighted(image, 1.0, line_image, 0.95, 0.0)
def mark_failed(image):
font = cv2.FONT_HERSHEY_SIMPLEX
text = "DETECT FAILED!"
textsize = cv2.getTextSize(text, font, 2, 5)[0]
textX = int((image.shape[1] - textsize[0]) / 2)
textY = int((image.shape[0] + textsize[1]) / 2)
cv2.putText(image, text, (textX, textY), font, 2, (255, 0, 0), 5)
return image
def process_image(dirpath, image_file):
if not os.path.exists('tmp'):
os.mkdir('tmp')
if not os.path.exists('output'):
os.makedirs('output')
image_name = os.path.splitext(image_file)[0]
# First load and show the sample image
image = mpimg.imread("{0}/{1}".format(dirpath, image_file))
im = plt.imshow(image)
plt.savefig('tmp/1.png')
# Now select the white and yellow lines
white_yellow = select_white_yellow(image)
im = plt.imshow(white_yellow, cmap='gray')
plt.savefig('tmp/2.png')
# Now convert to grayscale
gray_scale = convert_gray_scale(white_yellow)
im = plt.imshow(gray_scale, cmap='gray')
plt.savefig('tmp/3.png')
# Then apply a Gaussian blur
blurred_image = apply_smoothing(gray_scale)
im = plt.imshow(blurred_image, cmap='gray')
plt.savefig('tmp/4.png')
# Detect line edges
edged_image = detect_edges(blurred_image)
im = plt.imshow(edged_image, cmap='gray')
plt.savefig('tmp/5.png')
# Now ignore all but the area of interest
masked_image = select_region(edged_image)
im = plt.imshow(masked_image, cmap='gray')
plt.savefig('tmp/6.png')
# Apply Houghed lines algorithm
houghed_lines = hough_lines(masked_image)
if houghed_lines is not None:
houghed_image = draw_lane_lines(image, lane_lines(image, houghed_lines))
im = plt.imshow(houghed_image, cmap='gray')
output_name = "output/{0}_passed.gif".format(image_name)
print("Detected lanes in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
else:
im = plt.imshow(mark_failed(image), cmap='gray')
output_name = "output/{0}_failed.gif".format(image_name)
print("Failed detection in '{0}/{1}'. See result in '{2}'.".format(dirpath, image_file, output_name))
plt.savefig('tmp/7.png')
# Repeat last image in the loop a couple of times.
plt.savefig('tmp/8.png')
plt.savefig('tmp/9.png')
# Now generate an animated gif of the image stages
subprocess.call( ['convert', '-delay', '100', '-loop', '0', 'tmp/*.png', output_name] )
shutil.rmtree('tmp')
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python3 ./lane_detect.py images/*")
else:
for arg in sys.argv[1:]:
if not os.path.isfile(arg):
print("Not a file: {0}".format(arg))
else:
dirpath,filename = os.path.split(arg)
process_image(dirpath, filename)
|
mit
| 2,390,023,151,074,185,000
| 34.610811
| 109
| 0.627752
| false
| 3.082358
| false
| false
| false
|
hrantzsch/signature-verification
|
tools/mkdata_background.py
|
1
|
3585
|
"""
This script is used to create a training database based on GPDSSynth signatures.
Images are scaled to 192x96;
Paper-like backgrounds are added
"""
import argparse
import numpy as np
from scipy.misc import imread, imresize, imshow, imsave
from PIL import Image
import os
from skimage.transform import rotate
import time
import prepimage
def load_backgrounds(folder):
"""read image file and convert to grayscale"""
return [imresize(
np.dot(imread(os.path.join(folder, bg_file))[..., :3],
[0.299, 0.587, 0.144]),
0.5)
for bg_file in os.listdir(folder)
if '.jpg' in bg_file or '.png' in bg_file]
def get_background(img, size):
"""crop a random piece of desired size from the given image"""
y = np.random.randint(0, img.shape[0]-size[0])
x = np.random.randint(0, img.shape[1]-size[1])
return imresize(img[y:y+size[0], x:x+size[1]], (size[0], size[1]))
def get_signatures(data_dir, no_forgeries=False):
for (path, _, files) in os.walk(data_dir):
for f in files:
if '.png' in f and not (no_forgeries and 'cf' in f):
yield os.path.join(path, f)
def get_signatures_(data_dir, no_forgeries=False):
for f in os.listdir(data_dir):
if '.png' in f and not (no_forgeries and 'cf' in f):
yield os.path.join(data_dir, f)
def get_roi(image, pad=20):
roix, roiy = prepimage.min_max(prepimage.binarize(image))
roix = (max(0, roix[0] - pad), min(roix[1] + pad, image.shape[1]))
roiy = (max(0, roiy[0] - pad), min(roiy[1] + pad, image.shape[0]))
return roiy, roix
def process_signature(sig_path):
sig = imread(sig_path).astype(np.float32) / 255.0
sig = rotate(sig, np.random.randint(-25, 25), cval=1.0, resize=True)
roiy, roix = get_roi(sig)
shape = (roiy[1] - roiy[0], roix[1] - roix[0])
bg = get_background(np.random.choice(backgrounds), shape).astype(np.float32) / 255.0
img = bg + sig[roiy[0]:roiy[1], roix[0]:roix[1]]
img = imresize(img, target_size, mode='L').astype(np.float32)
img *= 1.0/img.max()
# return np.minimum(img, 1.0)
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('signatures',
help='Path to extracted GPDS data')
parser.add_argument('backgrounds',
help='Path to background files (jpg or png)')
parser.add_argument('--out', '-o', default='images',
help='Path to save output images')
parser.add_argument('--start', '-s', default=1, type=int,
help='User to start with (for resumes)')
args = parser.parse_args()
target_size = (384, 768)
# signatures = list(get_signatures(args.signatures))
backgrounds = load_backgrounds(args.backgrounds)
print("Loaded {} backgrounds".format(len(backgrounds)))
for user in range(args.start, 20):
user_str = "{}".format(user)
print("processing user " + user_str)
os.makedirs(os.path.join(args.out, user_str), exist_ok=True)
count = 0
start = time.clock()
for sig in get_signatures_(os.path.join(args.signatures, user_str)):
fname, _ = os.path.splitext(os.path.basename(sig))
for i in range(1, 21):
outname = os.path.join(args.out, user_str, "{}-{:02d}.png".format(fname, i))
imsave(outname, process_signature(sig), 'png')
count += 1
print("{} images in {:3f} sec".format(count, time.clock() - start))
|
gpl-3.0
| 3,424,798,210,652,326,400
| 35.212121
| 92
| 0.600558
| false
| 3.189502
| false
| false
| false
|
crDDI/dbgap
|
tests/test_file_downloader.py
|
1
|
2787
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
import shutil
from dbgap.file_downloader import FileDownloader
single_file_template = 'dbgap/studies/%(study)s/%(fullname)s/GapExchange_%(fullname)s.xml'
directory_template = '/dbgap/studies/%(study)s/%(fullname)s/pheno_variable_summaries'
class FileDownloaderTestCase(unittest.TestCase):
def test_dowload_single_file(self):
study = 'phs001007'
fullname = study + ".v1.p1"
dld = FileDownloader('ftp.ncbi.nlm.nih.gov')
self.assertEqual(open(os.path.join('data', 'phs001007.xml')).read(),
dld.download_file(single_file_template % dict(study=study, fullname=fullname)))
def test_dir_download(self):
test_dir = os.path.join('data', 'dltest')
shutil.rmtree(test_dir, ignore_errors=True)
os.makedirs(test_dir)
study = 'phs000722'
fullname = study + ".v1.p1"
dld = FileDownloader('ftp.ncbi.nlm.nih.gov')
self.assertEqual(4, dld.download_dir(directory_template % dict(study=study, fullname=fullname), test_dir,
name_map=lambda s: s.replace('.xml', '.tst'), file_filtr=lambda s: 'data_dict' in s))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 7,975,316,140,981,146,000
| 47.051724
| 113
| 0.717259
| false
| 3.892458
| true
| false
| false
|
perclasson/trail
|
trail/main.py
|
1
|
4059
|
"""Implements a wrapper script for executing a Python program from the
command line.
The wrapper script works by adding a special directory into the
'PYTHONPATH' environment variable, describing additional Python module
search directories, which contains a custom 'sitecustomize' module. When
the Python interpreter is started that custom 'sitecustomize' module
will be automatically loaded. This allows the custom 'sitecustomize'
file to then load any original 'sitecustomize' file which may have been
hidden and then bootstrap the registration of the post import hook
callback functions."""
import sys
import os
import time
_debug = os.environ.get(
'TRAIL_DEBUG', 'off').lower() in ('on', 'true', '1')
def log_message(text, *args):
if _debug:
text = text % args
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print('TRAIL: %s (%d) - %s' % (timestamp, os.getpid(), text))
def run_program(args):
log_message('trail - wrapper (%s)', __file__)
log_message('working_directory = %r', os.getcwd())
log_message('current_command = %r', sys.argv)
log_message('sys.prefix = %r', os.path.normpath(sys.prefix))
try:
log_message('sys.real_prefix = %r', sys.real_prefix)
except AttributeError:
pass
log_message('sys.version_info = %r', sys.version_info)
log_message('sys.executable = %r', sys.executable)
log_message('sys.flags = %r', sys.flags)
log_message('sys.path = %r', sys.path)
# Determine the location of the special bootstrap directory. Add
# this into the 'PYTHONPATH' environment variable, preserving any
# existing value the 'PYTHONPATH' environment variable may have.
root_directory = os.path.dirname(__file__)
boot_directory = os.path.join(root_directory, '__startup__')
log_message('root_directory = %r', root_directory)
log_message('boot_directory = %r', boot_directory)
python_path = boot_directory
if 'PYTHONPATH' in os.environ:
path = os.environ['PYTHONPATH'].split(os.path.pathsep)
if boot_directory not in path:
python_path = "%s%s%s" % (
boot_directory,
os.path.pathsep,
os.environ['PYTHONPATH']
)
os.environ['PYTHONPATH'] = python_path
# Set special environment variables which record the location of the
# Python installation or virtual environment being used as well as
# the Python version. The values of these are compared in the
# 'sitecustomize' module with the values for the Python interpreter
# which is later executed by the wrapper. If they don't match then
# nothing will be done. This check is made as using the wrapper
# script from one Python installation around 'python' executing from
# a different installation can cause problems.
os.environ['TRAIL_PYTHON_PREFIX'] = os.path.realpath(
os.path.normpath(sys.prefix))
os.environ['TRAIL_PYTHON_VERSION'] = '.'.join(
map(str, sys.version_info[:2]))
# Now launch the wrapped program. If the program to run was not an
# absolute or relative path then we need to search the directories
# specified in the 'PATH' environment variable to try and work out
# where it is actually located.
program_exe_path = args[0]
if not os.path.dirname(program_exe_path):
program_search_path = os.environ.get(
'PATH', '').split(os.path.pathsep)
for path in program_search_path:
path = os.path.join(path, program_exe_path)
if os.path.exists(path) and os.access(path, os.X_OK):
program_exe_path = path
break
log_message('program_exe_path = %r', program_exe_path)
log_message('execl_arguments = %r', [program_exe_path]+args)
os.execl(program_exe_path, *args)
def main():
if len(sys.argv) <= 1:
sys.exit('Usage: %s program [options]' % os.path.basename(
sys.argv[0]))
run_program(sys.argv[1:])
if __name__ == '__main__':
main()
|
bsd-2-clause
| -460,797,675,587,299,260
| 34.605263
| 72
| 0.654348
| false
| 3.876791
| false
| false
| false
|
Einsteinish/PyTune3
|
apps/reader/models.py
|
1
|
71869
|
import datetime
import time
import re
import redis
from collections import defaultdict
from operator import itemgetter
from pprint import pprint
from utils import log as logging
from utils import json_functions as json
from django.db import models, IntegrityError
from django.db.models import Q, F
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.template.defaultfilters import slugify
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.reader.managers import UserSubscriptionManager
from apps.rss_feeds.models import Feed, MStory, DuplicateFeed
from apps.rss_feeds.tasks import NewFeeds
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
from apps.analyzer.tfidf import tfidf
from utils.feed_functions import add_object_to_folder, chunks
class UserSubscription(models.Model):
"""
A feed which a user has subscribed to. Carries all of the cached information
about the subscription, including unread counts of the three primary scores.
Also has a dirty flag (needs_unread_recalc) which means that the unread counts
are not accurate and need to be calculated with `self.calculate_feed_scores()`.
"""
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user = models.ForeignKey(User, related_name='subscriptions')
feed = models.ForeignKey(Feed, related_name='subscribers')
user_title = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=False)
last_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
mark_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
unread_count_neutral = models.IntegerField(default=0)
unread_count_positive = models.IntegerField(default=0)
unread_count_negative = models.IntegerField(default=0)
unread_count_updated = models.DateTimeField(default=datetime.datetime.now)
oldest_unread_story_date = models.DateTimeField(default=datetime.datetime.now)
needs_unread_recalc = models.BooleanField(default=False)
feed_opens = models.IntegerField(default=0)
is_trained = models.BooleanField(default=False)
objects = UserSubscriptionManager()
def __unicode__(self):
return '[%s (%s): %s (%s)] ' % (self.user.username, self.user.pk,
self.feed.feed_title, self.feed.pk)
class Meta:
unique_together = ("user", "feed")
def canonical(self, full=False, include_favicon=True, classifiers=None):
feed = self.feed.canonical(full=full, include_favicon=include_favicon)
feed['feed_title'] = self.user_title or feed['feed_title']
feed['ps'] = self.unread_count_positive
feed['nt'] = self.unread_count_neutral
feed['ng'] = self.unread_count_negative
feed['active'] = self.active
feed['feed_opens'] = self.feed_opens
feed['subscribed'] = True
if classifiers:
feed['classifiers'] = classifiers
return feed
def save(self, *args, **kwargs):
user_title_max = self._meta.get_field('user_title').max_length
if self.user_title and len(self.user_title) > user_title_max:
self.user_title = self.user_title[:user_title_max]
try:
super(UserSubscription, self).save(*args, **kwargs)
except IntegrityError:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=self.feed_id)
for duplicate_feed in duplicate_feeds:
already_subscribed = UserSubscription.objects.filter(user=self.user, feed=duplicate_feed.feed)
if not already_subscribed:
self.feed = duplicate_feed.feed
super(UserSubscription, self).save(*args, **kwargs)
break
else:
if self: self.delete()
@classmethod
def subs_for_feeds(cls, user_id, feed_ids=None, read_filter="unread"):
usersubs = cls.objects
if read_filter == "unread":
usersubs = usersubs.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0))
if not feed_ids:
usersubs = usersubs.filter(user=user_id,
active=True).only('feed', 'mark_read_date', 'is_trained')
else:
usersubs = usersubs.filter(user=user_id,
active=True,
feed__in=feed_ids).only('feed', 'mark_read_date', 'is_trained')
return usersubs
@classmethod
def story_hashes(cls, user_id, feed_ids=None, usersubs=None, read_filter="unread", order="newest",
include_timestamps=False, group_by_feed=True, cutoff_date=None,
across_all_feeds=True):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
story_hashes = {} if group_by_feed else []
if not feed_ids and not across_all_feeds:
return story_hashes
if not usersubs:
usersubs = cls.subs_for_feeds(user_id, feed_ids=feed_ids, read_filter=read_filter)
feed_ids = [sub.feed_id for sub in usersubs]
if not feed_ids:
return story_hashes
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
unread_timestamp = int(time.mktime(cutoff_date.timetuple()))-1000
feed_counter = 0
read_dates = dict()
for us in usersubs:
read_dates[us.feed_id] = int(max(us.mark_read_date, cutoff_date).strftime('%s'))
for feed_id_group in chunks(feed_ids, 20):
pipeline = r.pipeline()
for feed_id in feed_id_group:
stories_key = 'F:%s' % feed_id
sorted_stories_key = 'zF:%s' % feed_id
read_stories_key = 'RS:%s:%s' % (user_id, feed_id)
unread_stories_key = 'U:%s:%s' % (user_id, feed_id)
unread_ranked_stories_key = 'zU:%s:%s' % (user_id, feed_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[feed_id] + 1
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
expire_unread_stories_key = True
else:
min_score = 0
unread_stories_key = stories_key
if order == 'oldest':
byscorefunc = pipeline.zrangebyscore
else:
byscorefunc = pipeline.zrevrangebyscore
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_feed:
story_hashes[feed_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', withscores=False,
hashes_only=False, cutoff_date=None, default_cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
ignore_user_stories = False
stories_key = 'F:%s' % (self.feed_id)
read_stories_key = 'RS:%s:%s' % (self.user_id, self.feed_id)
unread_stories_key = 'U:%s:%s' % (self.user_id, self.feed_id)
unread_ranked_stories_key = 'z%sU:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.feed_id)
if withscores or not offset or not rt.exists(unread_ranked_stories_key):
rt.delete(unread_ranked_stories_key)
if not r.exists(stories_key):
# print " ---> No stories on feed: %s" % self
return []
elif read_filter == 'all' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
sorted_stories_key = 'zF:%s' % (self.feed_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
if not ignore_user_stories:
r.delete(unread_stories_key)
dump = r.dump(unread_ranked_stories_key)
if dump:
pipeline = rt.pipeline()
pipeline.delete(unread_ranked_stories_key)
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
pipeline.execute()
r.delete(unread_ranked_stories_key)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if read_filter == "unread":
cutoff_date = max(cutoff_date, self.mark_read_date)
elif default_cutoff_date:
cutoff_date = default_cutoff_date
if order == 'oldest':
byscorefunc = rt.zrangebyscore
if read_filter == 'unread':
min_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
min_score = int(time.mktime(cutoff_date.timetuple())) - 1000
max_score = current_time
else:
byscorefunc = rt.zrevrangebyscore
min_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
max_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
max_score = 0
if settings.DEBUG and False:
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
print " ---> Unread all stories (%s - %s) %s stories: %s" % (
min_score,
max_score,
len(debug_stories),
debug_stories)
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=500,
withscores=withscores)[:limit]
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
if withscores or hashes_only:
return story_ids
elif story_ids:
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
mstories = MStory.objects(story_hash__in=story_ids).order_by(story_date_order)
stories = Feed.format_stories(mstories)
return stories
else:
return []
@classmethod
def feed_stories(cls, user_id, feed_ids=None, offset=0, limit=6,
order='newest', read_filter='all', usersubs=None, cutoff_date=None,
all_feed_ids=None, cache_prefix=""):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
across_all_feeds = False
if order == 'oldest':
range_func = rt.zrange
else:
range_func = rt.zrevrange
if feed_ids is None:
across_all_feeds = True
feed_ids = []
if not all_feed_ids:
all_feed_ids = [f for f in feed_ids]
# feeds_string = ""
feeds_string = ','.join(str(f) for f in sorted(all_feed_ids))[:30]
ranked_stories_keys = '%szU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
unread_ranked_stories_keys = '%szhU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
stories_cached = rt.exists(ranked_stories_keys)
unreads_cached = True if read_filter == "unread" else rt.exists(unread_ranked_stories_keys)
if offset and stories_cached and unreads_cached:
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_story_hashes = story_hashes
else:
unread_story_hashes = range_func(unread_ranked_stories_keys, 0, offset+limit)
return story_hashes, unread_story_hashes
else:
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_feed=False,
usersubs=usersubs,
cutoff_date=cutoff_date,
across_all_feeds=across_all_feeds)
if not story_hashes:
return [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, **dict(story_hash_group))
pipeline.execute()
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_feed=False,
cutoff_date=cutoff_date)
if unread_story_hashes:
for unread_story_hash_group in chunks(unread_story_hashes, 100):
rt.zadd(unread_ranked_stories_keys, **dict(unread_story_hash_group))
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
rt.expire(unread_ranked_stories_keys, 60*60)
return story_hashes, unread_feed_story_hashes
@classmethod
def add_subscription(cls, user, feed_address, folder=None, bookmarklet=False, auto_active=True,
skip_fetch=False):
feed = None
us = None
logging.user(user, "~FRAdding URL: ~SB%s (in %s) %s" % (feed_address, folder,
"~FCAUTO-ADD" if not auto_active else ""))
feed = Feed.get_feed_from_url(feed_address)
if not feed:
code = -1
if bookmarklet:
message = "This site does not have an RSS feed. Nothing is linked to from this page."
else:
message = "This address does not point to an RSS feed or a website with an RSS feed."
else:
us, subscription_created = cls.objects.get_or_create(
feed=feed,
user=user,
defaults={
'needs_unread_recalc': True,
'active': auto_active,
}
)
code = 1
message = ""
if us:
user_sub_folders_object, created = UserSubscriptionFolders.objects.get_or_create(
user=user,
defaults={'folders': '[]'}
)
if created:
user_sub_folders = []
else:
user_sub_folders = json.decode(user_sub_folders_object.folders)
user_sub_folders = add_object_to_folder(feed.pk, folder, user_sub_folders)
user_sub_folders_object.folders = json.encode(user_sub_folders)
user_sub_folders_object.save()
if auto_active or user.profile.is_premium:
us.active = True
us.save()
if not skip_fetch and feed.last_update < datetime.datetime.utcnow() - datetime.timedelta(days=1):
feed = feed.update()
from apps.social.models import MActivity
MActivity.new_feed_subscription(user_id=user.pk, feed_id=feed.pk, feed_title=feed.title)
feed.setup_feed_for_premium_subscribers()
return code, message, us
@classmethod
def feeds_with_updated_counts(cls, user, feed_ids=None, check_fetch_status=False, force=False):
feeds = {}
# Get subscriptions for user
user_subs = cls.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [f for f in feed_ids if f and not f.startswith('river')]
if feed_ids:
user_subs = user_subs.filter(feed__in=feed_ids)
for i, sub in enumerate(user_subs):
# Count unreads if subscription is stale.
if (force or
sub.needs_unread_recalc or
sub.unread_count_updated < user.profile.unread_cutoff or
sub.oldest_unread_story_date < user.profile.unread_cutoff):
sub = sub.calculate_feed_scores(silent=True, force=force)
if not sub: continue # TODO: Figure out the correct sub and give it a new feed_id
feed_id = sub.feed_id
feeds[feed_id] = {
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'id': feed_id,
}
if not sub.feed.fetched_once or check_fetch_status:
feeds[feed_id]['fetched_once'] = sub.feed.fetched_once
feeds[feed_id]['not_yet_fetched'] = not sub.feed.fetched_once # Legacy. Dammit.
if sub.feed.favicon_fetching:
feeds[feed_id]['favicon_fetching'] = True
if sub.feed.has_feed_exception or sub.feed.has_page_exception:
feeds[feed_id]['has_exception'] = True
feeds[feed_id]['exception_type'] = 'feed' if sub.feed.has_feed_exception else 'page'
feeds[feed_id]['feed_address'] = sub.feed.feed_address
feeds[feed_id]['exception_code'] = sub.feed.exception_code
return feeds
@classmethod
def queue_new_feeds(cls, user, new_feeds=None):
if not isinstance(user, User):
user = User.objects.get(pk=user)
if not new_feeds:
new_feeds = cls.objects.filter(user=user,
feed__fetched_once=False,
active=True).values('feed_id')
new_feeds = list(set([f['feed_id'] for f in new_feeds]))
if not new_feeds:
return
logging.user(user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
size = 4
for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
NewFeeds.apply_async(args=(t,), queue="new_feeds")
@classmethod
def refresh_stale_feeds(cls, user, exclude_new=False):
if not isinstance(user, User):
user = User.objects.get(pk=user)
stale_cutoff = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
# TODO: Refactor below using last_update from REDIS_FEED_UPDATE_POOL
stale_feeds = UserSubscription.objects.filter(user=user, active=True, feed__last_update__lte=stale_cutoff)
if exclude_new:
stale_feeds = stale_feeds.filter(feed__fetched_once=True)
all_feeds = UserSubscription.objects.filter(user=user, active=True)
logging.user(user, "~FG~BBRefreshing stale feeds: ~SB%s/%s" % (
stale_feeds.count(), all_feeds.count()))
for sub in stale_feeds:
sub.feed.fetched_once = False
sub.feed.save()
if stale_feeds:
stale_feeds = list(set([f.feed_id for f in stale_feeds]))
cls.queue_new_feeds(user, new_feeds=stale_feeds)
@classmethod
def identify_deleted_feed_users(cls, old_feed_id):
users = UserSubscriptionFolders.objects.filter(folders__contains=old_feed_id).only('user')
user_ids = [usf.user_id for usf in users]
f = open('utils/backups/users.txt', 'w')
f.write('\n'.join([str(u) for u in user_ids]))
return user_ids
@classmethod
def recreate_deleted_feed(cls, new_feed_id, old_feed_id=None, skip=0):
user_ids = sorted([int(u) for u in open('utils/backups/users.txt').read().split('\n') if u])
count = len(user_ids)
for i, user_id in enumerate(user_ids):
if i < skip: continue
if i % 1000 == 0:
print "\n\n ------------------------------------------------"
print "\n ---> %s/%s (%s%%)" % (i, count, round(float(i)/count))
print "\n ------------------------------------------------\n"
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
print " ***> %s has no account" % user_id
continue
us, created = UserSubscription.objects.get_or_create(user_id=user_id, feed_id=new_feed_id, defaults={
'needs_unread_recalc': True,
'active': True,
'is_trained': True
})
if not created:
print " ***> %s already subscribed" % user.username
try:
usf = UserSubscriptionFolders.objects.get(user_id=user_id)
usf.add_missing_feeds()
except UserSubscriptionFolders.DoesNotExist:
print " ***> %s has no USF" % user.username
# Move classifiers
if old_feed_id:
classifier_count = 0
for classifier_type in (MClassifierAuthor, MClassifierFeed, MClassifierTag, MClassifierTitle):
classifiers = classifier_type.objects.filter(user_id=user_id, feed_id=old_feed_id)
classifier_count += classifiers.count()
for classifier in classifiers:
classifier.feed_id = new_feed_id
try:
classifier.save()
except NotUniqueError:
continue
if classifier_count:
print " Moved %s classifiers for %s" % (classifier_count, user.username)
def trim_read_stories(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
read_stories_key = "RS:%s:%s" % (self.user_id, self.feed_id)
stale_story_hashes = r.sdiff(read_stories_key, "F:%s" % self.feed_id)
if not stale_story_hashes:
return
logging.user(self.user, "~FBTrimming ~FR%s~FB read stories (~SB%s~SN)..." % (len(stale_story_hashes), self.feed_id))
r.srem(read_stories_key, *stale_story_hashes)
r.srem("RS:%s" % self.feed_id, *stale_story_hashes)
@classmethod
def trim_user_read_stories(self, user_id):
user = User.objects.get(pk=user_id)
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
subs = UserSubscription.objects.filter(user_id=user_id).only('feed')
if not subs: return
key = "RS:%s" % user_id
feeds = [f.feed_id for f in subs]
old_rs = r.smembers(key)
old_count = len(old_rs)
if not old_count:
logging.user(user, "~FBTrimming all read stories, ~SBnone found~SN.")
return
# r.sunionstore("%s:backup" % key, key)
# r.expire("%s:backup" % key, 60*60*24)
r.sunionstore(key, *["%s:%s" % (key, f) for f in feeds])
new_rs = r.smembers(key)
missing_rs = []
missing_count = 0
feed_re = re.compile(r'(\d+):.*?')
for i, rs in enumerate(old_rs):
if i and i % 1000 == 0:
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
missing_rs = []
found = feed_re.search(rs)
if not found:
print " ---> Not found: %s" % rs
continue
rs_feed_id = found.groups()[0]
if int(rs_feed_id) not in feeds:
missing_rs.append(rs)
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
new_count = len(new_rs)
new_total = new_count + missing_count
logging.user(user, "~FBTrimming ~FR%s~FB/%s (~SB%s sub'ed ~SN+ ~SB%s unsub'ed~SN saved)" %
(old_count - new_total, old_count, new_count, missing_count))
def mark_feed_read(self, cutoff_date=None):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
recount = True
# Use the latest story to get last read time.
if cutoff_date:
cutoff_date = cutoff_date + datetime.timedelta(seconds=1)
else:
latest_story = MStory.objects(story_feed_id=self.feed.pk)\
.order_by('-story_date').only('story_date').limit(1)
if latest_story and len(latest_story) >= 1:
cutoff_date = (latest_story[0]['story_date']
+ datetime.timedelta(seconds=1))
else:
cutoff_date = datetime.datetime.utcnow()
recount = False
if cutoff_date > self.mark_read_date or cutoff_date > self.oldest_unread_story_date:
self.last_read_date = cutoff_date
self.mark_read_date = cutoff_date
self.oldest_unread_story_date = cutoff_date
else:
logging.user(self.user, "Not marking %s as read: %s > %s/%s" %
(self, cutoff_date, self.mark_read_date, self.oldest_unread_story_date))
if not recount:
self.unread_count_negative = 0
self.unread_count_positive = 0
self.unread_count_neutral = 0
self.unread_count_updated = datetime.datetime.utcnow()
self.needs_unread_recalc = False
else:
self.needs_unread_recalc = True
self.save()
return True
def mark_newer_stories_read(self, cutoff_date):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
cutoff_date = cutoff_date - datetime.timedelta(seconds=1)
story_hashes = self.get_stories(limit=500, order="newest", cutoff_date=cutoff_date,
read_filter="unread", hashes_only=True)
data = self.mark_story_ids_as_read(story_hashes, aggregated=True)
return data
def mark_story_ids_as_read(self, story_hashes, request=None, aggregated=False):
data = dict(code=0, payload=story_hashes)
if not request:
request = self.user
if not self.needs_unread_recalc:
self.needs_unread_recalc = True
self.save()
if len(story_hashes) > 1:
logging.user(request, "~FYRead %s stories in feed: %s" % (len(story_hashes), self.feed))
else:
logging.user(request, "~FYRead story in feed: %s" % (self.feed))
RUserStory.aggregate_mark_read(self.feed_id)
for story_hash in set(story_hashes):
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
return data
def invert_read_stories_after_unread_story(self, story, request=None):
data = dict(code=1)
if story.story_date > self.mark_read_date:
return data
# Story is outside the mark as read range, so invert all stories before.
newer_stories = MStory.objects(story_feed_id=story.story_feed_id,
story_date__gte=story.story_date,
story_date__lte=self.mark_read_date
).only('story_hash')
newer_stories = [s.story_hash for s in newer_stories]
self.mark_read_date = story.story_date - datetime.timedelta(minutes=1)
self.needs_unread_recalc = True
self.save()
# Mark stories as read only after the mark_read_date has been moved, otherwise
# these would be ignored.
data = self.mark_story_ids_as_read(newer_stories, request=request, aggregated=True)
return data
def calculate_feed_scores(self, silent=False, stories=None, force=False):
# now = datetime.datetime.strptime("2009-07-06 22:30:03", "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
oldest_unread_story_date = now
if self.user.profile.last_seen_on < self.user.profile.unread_cutoff and not force:
# if not silent:
# logging.info(' ---> [%s] SKIPPING Computing scores: %s (1 week+)' % (self.user, self.feed))
return self
ong = self.unread_count_negative
ont = self.unread_count_neutral
ops = self.unread_count_positive
oousd = self.oldest_unread_story_date
ucu = self.unread_count_updated
onur = self.needs_unread_recalc
oit = self.is_trained
# if not self.feed.fetched_once:
# if not silent:
# logging.info(' ---> [%s] NOT Computing scores: %s' % (self.user, self.feed))
# self.needs_unread_recalc = False
# self.save()
# return
feed_scores = dict(negative=0, neutral=0, positive=0)
# Two weeks in age. If mark_read_date is older, mark old stories as read.
date_delta = self.user.profile.unread_cutoff
if date_delta < self.mark_read_date:
date_delta = self.mark_read_date
else:
self.mark_read_date = date_delta
if self.is_trained:
if not stories:
stories = cache.get('S:%s' % self.feed_id)
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
cutoff_date=self.user.profile.unread_cutoff)
if not stories:
stories_db = MStory.objects(story_hash__in=unread_story_hashes)
stories = Feed.format_stories(stories_db, self.feed_id)
unread_stories = []
for story in stories:
if story['story_date'] < date_delta:
continue
if story['story_hash'] in unread_story_hashes:
unread_stories.append(story)
if story['story_date'] < oldest_unread_story_date:
oldest_unread_story_date = story['story_date']
# if not silent:
# logging.info(' ---> [%s] Format stories: %s' % (self.user, datetime.datetime.now() - now))
classifier_feeds = list(MClassifierFeed.objects(user_id=self.user_id, feed_id=self.feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=self.user_id, feed_id=self.feed_id))
if (not len(classifier_feeds) and
not len(classifier_authors) and
not len(classifier_titles) and
not len(classifier_tags)):
self.is_trained = False
# if not silent:
# logging.info(' ---> [%s] Classifiers: %s (%s)' % (self.user, datetime.datetime.now() - now, classifier_feeds.count() + classifier_authors.count() + classifier_tags.count() + classifier_titles.count()))
scores = {
'feed': apply_classifier_feeds(classifier_feeds, self.feed),
}
for story in unread_stories:
scores.update({
'author' : apply_classifier_authors(classifier_authors, story),
'tags' : apply_classifier_tags(classifier_tags, story),
'title' : apply_classifier_titles(classifier_titles, story),
})
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
feed_scores['positive'] += 1
elif min_score < 0:
feed_scores['negative'] += 1
else:
if scores['feed'] > 0:
feed_scores['positive'] += 1
elif scores['feed'] < 0:
feed_scores['negative'] += 1
else:
feed_scores['neutral'] += 1
else:
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
include_timestamps=True,
cutoff_date=date_delta)
feed_scores['neutral'] = len(unread_story_hashes)
if feed_scores['neutral']:
oldest_unread_story_date = datetime.datetime.fromtimestamp(unread_story_hashes[-1][1])
if not silent or settings.DEBUG:
logging.user(self.user, '~FBUnread count (~SB%s~SN%s): ~SN(~FC%s~FB/~FC%s~FB/~FC%s~FB) ~SBto~SN (~FC%s~FB/~FC%s~FB/~FC%s~FB)' % (self.feed_id, '/~FMtrained~FB' if self.is_trained else '', ong, ont, ops, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.unread_count_positive = feed_scores['positive']
self.unread_count_neutral = feed_scores['neutral']
self.unread_count_negative = feed_scores['negative']
self.unread_count_updated = datetime.datetime.now()
self.oldest_unread_story_date = oldest_unread_story_date
self.needs_unread_recalc = False
update_fields = []
if self.unread_count_positive != ops: update_fields.append('unread_count_positive')
if self.unread_count_neutral != ont: update_fields.append('unread_count_neutral')
if self.unread_count_negative != ong: update_fields.append('unread_count_negative')
if self.unread_count_updated != ucu: update_fields.append('unread_count_updated')
if self.oldest_unread_story_date != oousd: update_fields.append('oldest_unread_story_date')
if self.needs_unread_recalc != onur: update_fields.append('needs_unread_recalc')
if self.is_trained != oit: update_fields.append('is_trained')
if len(update_fields):
self.save(update_fields=update_fields)
if (self.unread_count_positive == 0 and
self.unread_count_neutral == 0):
self.mark_feed_read()
if not silent:
logging.user(self.user, '~FC~SNComputing scores: %s (~SB%s~SN/~SB%s~SN/~SB%s~SN)' % (self.feed, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.trim_read_stories()
return self
@staticmethod
def score_story(scores):
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
return 1
elif min_score < 0:
return -1
return scores['feed']
def switch_feed(self, new_feed, old_feed):
# Rewrite feed in subscription folders
try:
user_sub_folders = UserSubscriptionFolders.objects.get(user=self.user)
except Exception, e:
logging.info(" *** ---> UserSubscriptionFolders error: %s" % e)
return
logging.info(" ===> %s " % self.user)
# Switch read stories
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
new_feed_id=new_feed.pk)
def switch_feed_for_classifier(model):
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
if duplicates.count():
logging.info(" ---> Switching %s %s" % (duplicates.count(), model))
for duplicate in duplicates:
duplicate.feed_id = new_feed.pk
if duplicate.social_user_id is None:
duplicate.social_user_id = 0
try:
duplicate.save()
pass
except (IntegrityError, OperationError):
logging.info(" !!!!> %s already exists" % duplicate)
duplicate.delete()
switch_feed_for_classifier(MClassifierTitle)
switch_feed_for_classifier(MClassifierAuthor)
switch_feed_for_classifier(MClassifierFeed)
switch_feed_for_classifier(MClassifierTag)
# Switch to original feed for the user subscription
self.feed = new_feed
self.needs_unread_recalc = True
try:
UserSubscription.objects.get(user=self.user, feed=new_feed)
except UserSubscription.DoesNotExist:
self.save()
user_sub_folders.rewrite_feed(new_feed, old_feed)
else:
# except (IntegrityError, OperationError):
logging.info(" !!!!> %s already subscribed" % self.user)
self.delete()
return
@classmethod
def collect_orphan_feeds(cls, user):
us = cls.objects.filter(user=user)
try:
usf = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
return
us_feed_ids = set([sub.feed_id for sub in us])
folders = json.decode(usf.folders)
def collect_ids(folders, found_ids):
for item in folders:
# print ' --> %s' % item
if isinstance(item, int):
# print ' --> Adding feed: %s' % item
found_ids.add(item)
elif isinstance(item, dict):
# print ' --> Descending folder dict: %s' % item.values()
found_ids.update(collect_ids(item.values(), found_ids))
elif isinstance(item, list):
# print ' --> Descending folder list: %s' % len(item)
found_ids.update(collect_ids(item, found_ids))
# print ' --> Returning: %s' % found_ids
return found_ids
found_ids = collect_ids(folders, set())
diff = len(us_feed_ids) - len(found_ids)
if diff > 0:
logging.info(" ---> Collecting orphans on %s. %s feeds with %s orphans" % (user.username, len(us_feed_ids), diff))
orphan_ids = us_feed_ids - found_ids
folders.extend(list(orphan_ids))
usf.folders = json.encode(folders)
usf.save()
@classmethod
def verify_feeds_scheduled(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
user = User.objects.get(pk=user_id)
subs = cls.objects.filter(user=user)
feed_ids = [sub.feed.pk for sub in subs]
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('scheduled_updates', feed_id)
p.zscore('error_feeds', feed_id)
results = p.execute()
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('queued_feeds', feed_id)
try:
results_queued = p.execute()
except:
results_queued = map(lambda x: False, range(len(feed_ids)))
safety_net = []
for f, feed_id in enumerate(feed_ids):
scheduled_updates = results[f*2]
error_feeds = results[f*2+1]
queued_feeds = results[f]
if not scheduled_updates and not queued_feeds and not error_feeds:
safety_net.append(feed_id)
if not safety_net: return
logging.user(user, "~FBFound ~FR%s unscheduled feeds~FB, scheduling..." % len(safety_net))
for feed_id in safety_net:
feed = Feed.get_by_id(feed_id)
feed.set_next_scheduled_update()
@classmethod
def count_subscribers_to_other_subscriptions(cls, feed_id):
# feeds = defaultdict(int)
subscribing_users = cls.objects.filter(feed=feed_id).values('user', 'feed_opens').order_by('-feed_opens')[:25]
print "Got subscribing users"
subscribing_user_ids = [sub['user'] for sub in subscribing_users]
print "Got subscribing user ids"
cofeeds = cls.objects.filter(user__in=subscribing_user_ids).values('feed').annotate(
user_count=Count('user')).order_by('-user_count')[:200]
print "Got cofeeds: %s" % len(cofeeds)
# feed_subscribers = Feed.objects.filter(pk__in=[f['feed'] for f in cofeeds]).values('pk', 'num_subscribers')
# max_local_subscribers = float(max([f['user_count'] for f in cofeeds]))
# max_total_subscribers = float(max([f['num_subscribers'] for f in feed_subscribers]))
# feed_subscribers = dict([(s['pk'], float(s['num_subscribers'])) for s in feed_subscribers])
# pctfeeds = [(f['feed'],
# f['user_count'],
# feed_subscribers[f['feed']],
# f['user_count']/max_total_subscribers,
# f['user_count']/max_local_subscribers,
# max_local_subscribers,
# max_total_subscribers) for f in cofeeds]
# print pctfeeds[:5]
# orderedpctfeeds = sorted(pctfeeds, key=lambda f: .5*f[3]+.5*f[4], reverse=True)[:8]
# pprint([(Feed.get_by_id(o[0]), o[1], o[2], o[3], o[4]) for o in orderedpctfeeds])
users_by_feeds = {}
for feed in [f['feed'] for f in cofeeds]:
users_by_feeds[feed] = [u['user'] for u in cls.objects.filter(feed=feed, user__in=subscribing_user_ids).values('user')]
print "Got users_by_feeds"
table = tfidf()
for feed in users_by_feeds.keys():
table.addDocument(feed, users_by_feeds[feed])
print "Got table"
sorted_table = sorted(table.similarities(subscribing_user_ids), key=itemgetter(1), reverse=True)[:8]
pprint([(Feed.get_by_id(o[0]), o[1]) for o in sorted_table])
return table
# return cofeeds
class RUserStory:
@classmethod
def mark_story_hashes_read(cls, user_id, story_hashes, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
feed_ids = set()
friend_ids = set()
if not isinstance(story_hashes, list):
story_hashes = [story_hashes]
single_story = len(story_hashes) == 1
for story_hash in story_hashes:
feed_id, _ = MStory.split_story_hash(story_hash)
feed_ids.add(feed_id)
if single_story:
cls.aggregate_mark_read(feed_id)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_read(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=p)
p.execute()
# p2.execute()
return list(feed_ids), list(friend_ids)
@classmethod
def mark_story_hash_unread(cls, user_id, story_hash, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
friend_ids = set()
feed_id, _ = MStory.split_story_hash(story_hash)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_unread(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=r)
return feed_id, list(friend_ids)
@classmethod
def aggregate_mark_read(cls, feed_id):
if not feed_id:
logging.debug(" ***> ~BR~FWNo feed_id on aggregate mark read. Ignoring.")
return
r = redis.Redis(connection_pool=settings.REDIS_FEED_READ_POOL)
week_of_year = datetime.datetime.now().strftime('%Y-%U')
feed_read_key = "fR:%s:%s" % (feed_id, week_of_year)
r.incr(feed_read_key)
r.expire(feed_read_key, 2*settings.DAYS_OF_STORY_HASHES*24*60*60)
@classmethod
def mark_read(cls, user_id, story_feed_id, story_hash, social_user_ids=None,
aggregated=False, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.sadd(key, story_hash)
# r2.sadd(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
if not aggregated:
key = 'lRS:%s' % user_id
r.lpush(key, story_hash)
r.ltrim(key, 0, 1000)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
@staticmethod
def story_can_be_marked_read_by_user(story, user):
message = None
if story.story_date < user.profile.unread_cutoff:
if user.profile.is_premium:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD)
elif story.story_date > user.profile.unread_cutoff_premium:
message = "Story is more than %s days old. Premiums can mark unread up to 30 days." % (
settings.DAYS_OF_UNREAD_FREE)
else:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD_FREE)
return message
@staticmethod
def mark_unread(user_id, story_feed_id, story_hash, social_user_ids=None, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.srem(key, story_hash)
# r2.srem(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
read_stories_list_key = 'lRS:%s' % user_id
r.lrem(read_stories_list_key, story_hash)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
@staticmethod
def get_stories(user_id, feed_id, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
story_hashes = r.smembers("RS:%s:%s" % (user_id, feed_id))
return story_hashes
@staticmethod
def get_read_stories(user_id, offset=0, limit=12, order="newest"):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "lRS:%s" % user_id
if order == "oldest":
count = r.llen(key)
if offset >= count: return []
offset = max(0, count - (offset+limit))
story_hashes = r.lrange(key, offset, offset+limit)
elif order == "newest":
story_hashes = r.lrange(key, offset, offset+limit)
return story_hashes
@classmethod
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
for story_hash in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
p.sadd(read_feed_key, new_story_hash)
# p2.sadd(read_feed_key, new_story_hash)
p.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % (user_id)
p.sadd(read_user_key, new_story_hash)
# p2.sadd(read_user_key, new_story_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
if len(story_hashes) > 0:
logging.info(" ---> %s read stories" % len(story_hashes))
@classmethod
def switch_hash(cls, feed_id, old_hash, new_hash):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
usersubs = UserSubscription.objects.filter(feed_id=feed_id, last_read_date__gte=UNREAD_CUTOFF)
logging.info(" ---> ~SB%s usersubs~SN to switch read story hashes..." % len(usersubs))
for sub in usersubs:
rs_key = "RS:%s:%s" % (sub.user.pk, feed_id)
read = r.sismember(rs_key, old_hash)
if read:
p.sadd(rs_key, new_hash)
# p2.sadd(rs_key, new_hash)
p.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % sub.user.pk
p.sadd(read_user_key, new_hash)
# p2.sadd(read_user_key, new_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
@classmethod
def read_story_count(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "RS:%s" % user_id
count = r.scard(key)
return count
class UserSubscriptionFolders(models.Model):
"""
A JSON list of folders and feeds for while a user has subscribed. The list
is a recursive descent of feeds and folders in folders. Used to layout
the feeds and folders in the Reader's feed navigation pane.
"""
user = models.ForeignKey(User, unique=True)
folders = models.TextField(default="[]")
def __unicode__(self):
return "[%s]: %s" % (self.user, len(self.folders),)
class Meta:
verbose_name_plural = "folders"
verbose_name = "folder"
def compact(self):
folders = json.decode(self.folders)
def _compact(folder):
new_folder = []
for item in folder:
if isinstance(item, int) and item not in new_folder:
new_folder.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
new_folder.append({f_k: _compact(f_v)})
return new_folder
new_folders = _compact(folders)
logging.info(" ---> Compacting from %s to %s" % (folders, new_folders))
new_folders = json.encode(new_folders)
logging.info(" ---> Compacting from %s to %s" % (len(self.folders), len(new_folders)))
self.folders = new_folders
self.save()
def add_folder(self, parent_folder, folder):
if self.folders:
user_sub_folders = json.decode(self.folders)
else:
user_sub_folders = []
obj = {folder: []}
user_sub_folders = add_object_to_folder(obj, parent_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def arranged_folders(self):
user_sub_folders = json.decode(self.folders)
def _arrange_folder(folder):
folder_feeds = []
folder_folders = []
for item in folder:
if isinstance(item, int):
folder_feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
arranged_folder = _arrange_folder(f_v)
folder_folders.append({f_k: arranged_folder})
arranged_folder = folder_feeds + folder_folders
return arranged_folder
return _arrange_folder(user_sub_folders)
def flatten_folders(self, feeds=None, inactive_feeds=None):
folders = json.decode(self.folders)
flat_folders = {" ": []}
if feeds and not inactive_feeds:
inactive_feeds = []
def _flatten_folders(items, parent_folder="", depth=0):
for item in items:
if (isinstance(item, int) and
(not feeds or
(item in feeds or item in inactive_feeds))):
if not parent_folder:
parent_folder = ' '
if parent_folder in flat_folders:
flat_folders[parent_folder].append(item)
else:
flat_folders[parent_folder] = [item]
elif isinstance(item, dict):
for folder_name in item:
folder = item[folder_name]
flat_folder_name = "%s%s%s" % (
parent_folder if parent_folder and parent_folder != ' ' else "",
" - " if parent_folder and parent_folder != ' ' else "",
folder_name
)
flat_folders[flat_folder_name] = []
_flatten_folders(folder, flat_folder_name, depth+1)
_flatten_folders(folders)
return flat_folders
def delete_feed(self, feed_id, in_folder, commit_delete=True):
feed_id = int(feed_id)
def _find_feed_in_folders(old_folders, folder_name='', multiples_found=False, deleted=False):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
if (folder == feed_id and in_folder is not None and (
(folder_name != in_folder) or
(folder_name == in_folder and deleted))):
multiples_found = True
logging.user(self.user, "~FB~SBDeleting feed, and a multiple has been found in '%s' / '%s' %s" % (folder_name, in_folder, '(deleted)' if deleted else ''))
if (folder == feed_id and
(folder_name == in_folder or in_folder is None) and
not deleted):
logging.user(self.user, "~FBDelete feed: %s'th item: %s folders/feeds" % (
k, len(old_folders)
))
deleted = True
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf, multiples_found, deleted = _find_feed_in_folders(f_v, f_k, multiples_found, deleted)
new_folders.append({f_k: nf})
return new_folders, multiples_found, deleted
user_sub_folders = self.arranged_folders()
user_sub_folders, multiples_found, deleted = _find_feed_in_folders(user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
if not multiples_found and deleted and commit_delete:
try:
user_sub = UserSubscription.objects.get(user=self.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
user_sub = UserSubscription.objects.get(user=self.user,
feed=duplicate_feed[0].feed)
except Feed.DoesNotExist:
return
if user_sub:
user_sub.delete()
def delete_folder(self, folder_to_delete, in_folder, feed_ids_in_folder, commit_delete=True):
def _find_folder_in_folders(old_folders, folder_name, feeds_to_delete, deleted_folder=None):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
if folder in feeds_to_delete:
feeds_to_delete.remove(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
if f_k == folder_to_delete and (folder_name == in_folder or in_folder is None):
logging.user(self.user, "~FBDeleting folder '~SB%s~SN' in '%s': %s" % (f_k, folder_name, folder))
deleted_folder = folder
else:
nf, feeds_to_delete, deleted_folder = _find_folder_in_folders(f_v, f_k, feeds_to_delete, deleted_folder)
new_folders.append({f_k: nf})
return new_folders, feeds_to_delete, deleted_folder
user_sub_folders = json.decode(self.folders)
user_sub_folders, feeds_to_delete, deleted_folder = _find_folder_in_folders(user_sub_folders, '', feed_ids_in_folder)
self.folders = json.encode(user_sub_folders)
self.save()
if commit_delete:
UserSubscription.objects.filter(user=self.user, feed__in=feeds_to_delete).delete()
return deleted_folder
def delete_feeds_by_folder(self, feeds_by_folder):
logging.user(self.user, "~FBDeleting ~FR~SB%s~SN feeds~FB: ~SB%s" % (
len(feeds_by_folder), feeds_by_folder))
for feed_id, in_folder in feeds_by_folder:
self.delete_feed(feed_id, in_folder)
return self
def rename_folder(self, folder_to_rename, new_folder_name, in_folder):
def _find_folder_in_folders(old_folders, folder_name):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf = _find_folder_in_folders(f_v, f_k)
if f_k == folder_to_rename and folder_name == in_folder:
logging.user(self.user, "~FBRenaming folder '~SB%s~SN' in '%s' to: ~SB%s" % (
f_k, folder_name, new_folder_name))
f_k = new_folder_name
new_folders.append({f_k: nf})
return new_folders
user_sub_folders = json.decode(self.folders)
user_sub_folders = _find_folder_in_folders(user_sub_folders, '')
self.folders = json.encode(user_sub_folders)
self.save()
def move_feed_to_folders(self, feed_id, in_folders=None, to_folders=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folders, to_folders))
user_sub_folders = json.decode(self.folders)
for in_folder in in_folders:
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
for to_folder in to_folders:
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feed_to_folder(self, feed_id, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_folder_to_folder(self, folder_name, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving folder '~SB%s~SN' in '%s' to: ~SB%s" % (
folder_name, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
deleted_folder = self.delete_folder(folder_name, in_folder, [], commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(deleted_folder, to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feeds_by_folder_to_folder(self, feeds_by_folder, to_folder):
logging.user(self.user, "~FBMoving ~SB%s~SN feeds to folder: ~SB%s" % (
len(feeds_by_folder), to_folder))
for feed_id, in_folder in feeds_by_folder:
feed_id = int(feed_id)
self.move_feed_to_folder(feed_id, in_folder, to_folder)
return self
def rewrite_feed(self, original_feed, duplicate_feed):
def rewrite_folders(folders, original_feed, duplicate_feed):
new_folders = []
for k, folder in enumerate(folders):
if isinstance(folder, int):
if folder == duplicate_feed.pk:
# logging.info(" ===> Rewrote %s'th item: %s" % (k+1, folders))
new_folders.append(original_feed.pk)
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
new_folders.append({f_k: rewrite_folders(f_v, original_feed, duplicate_feed)})
return new_folders
folders = json.decode(self.folders)
folders = rewrite_folders(folders, original_feed, duplicate_feed)
self.folders = json.encode(folders)
self.save()
def flat(self):
folders = json.decode(self.folders)
def _flat(folder, feeds=None):
if not feeds:
feeds = []
for item in folder:
if isinstance(item, int) and item not in feeds:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
feeds.extend(_flat(f_v))
return feeds
return _flat(folders)
def feed_ids_under_folder_slug(self, slug):
folders = json.decode(self.folders)
def _feeds(folder, found=False, folder_title=None):
feeds = []
local_found = False
for item in folder:
if isinstance(item, int) and item not in feeds and found:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
if slugify(f_k) == slug:
found = True
local_found = True
folder_title = f_k
found_feeds, folder_title = _feeds(f_v, found, folder_title)
feeds.extend(found_feeds)
if local_found:
found = False
local_found = False
return feeds, folder_title
return _feeds(folders)
@classmethod
def add_all_missing_feeds(cls):
usf = cls.objects.all().order_by('pk')
total = usf.count()
for i, f in enumerate(usf):
print "%s/%s: %s" % (i, total, f)
f.add_missing_feeds()
def add_missing_feeds(self):
all_feeds = self.flat()
subs = [us.feed_id for us in
UserSubscription.objects.filter(user=self.user).only('feed')]
missing_subs = set(all_feeds) - set(subs)
if missing_subs:
logging.debug(" ---> %s is missing %s subs. Adding %s..." % (
self.user, len(missing_subs), missing_subs))
for feed_id in missing_subs:
feed = Feed.get_by_id(feed_id)
if feed:
us, _ = UserSubscription.objects.get_or_create(user=self.user, feed=feed, defaults={
'needs_unread_recalc': True
})
if not us.needs_unread_recalc:
us.needs_unread_recalc = True
us.save()
missing_folder_feeds = set(subs) - set(all_feeds)
if missing_folder_feeds:
user_sub_folders = json.decode(self.folders)
logging.debug(" ---> %s is missing %s folder feeds. Adding %s..." % (
self.user, len(missing_folder_feeds), missing_folder_feeds))
for feed_id in missing_folder_feeds:
feed = Feed.get_by_id(feed_id)
if feed and feed.pk == feed_id:
user_sub_folders = add_object_to_folder(feed_id, "", user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def auto_activate(self):
if self.user.profile.is_premium: return
active_count = UserSubscription.objects.filter(user=self.user, active=True).count()
if active_count: return
all_feeds = self.flat()
if not all_feeds: return
for feed in all_feeds[:64]:
try:
sub = UserSubscription.objects.get(user=self.user, feed=feed)
except UserSubscription.DoesNotExist:
continue
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
class Feature(models.Model):
"""
Simple blog-like feature board shown to all users on the home page.
"""
description = models.TextField(default="")
date = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "[%s] %s" % (self.date, self.description[:50])
class Meta:
ordering = ["-date"]
|
mit
| -6,377,939,261,452,766,000
| 43.556107
| 289
| 0.542445
| false
| 3.851913
| false
| false
| false
|
okdshin/Yender
|
tutorial/tutorial.py
|
1
|
3955
|
import os, random, time
import yender
import numpy as np
import collections
block_set = collections.OrderedDict()
block_set["."] = yender.Block(char=".", name="air", visible=False)
block_set["#"] = yender.Block(char="#", name="stone", color=(127, 127, 127), movable=False)
block_set["R"] = yender.Block(char="R", name="red_tile", block_type="tile", color=(255, 0, 0))
block_set["B"] = yender.Block(char="B", name="blue_tile", block_type="tile", color=(0, 0, 255))
block_set["Y"] = yender.Block(char="Y", name="yellow_tile", block_type="tile", color=(255, 255, 0))
block_set["G"] = yender.Block(char="G", name="green_tile", block_type="tile", color=(0, 255, 0))
block_id_dict = {}
for i, block in enumerate(block_set.values()):
block_id_dict[block.name] = i
def make_i_maze_map():
map_source = [
"#######",
"#2...3#",
"###.###",
"###.###",
"###.###",
"#..0.1#",
"#######",
]
map_, place_holders = yender.load_map(block_set, map_source)
# place goal tiles and an indicator tile
start_pos = place_holders["0"]
indicator_pos = place_holders["1"]
blue_pos = place_holders["2"]
red_pos = place_holders["3"]
indicator_color = random.choice(("G", "Y"))
map_.set_block(start_pos, block_set["."])
map_.set_block(indicator_pos, block_set[indicator_color])
map_.set_block(blue_pos, block_set["B"])
map_.set_block(red_pos, block_set["R"])
return map_, indicator_color, start_pos, blue_pos, red_pos
class I_MazeEnv:
max_step = 50
def __init__(self):
self.rogue_env = yender.RogueEnv()
def get_ob(self):
block_ob = yender.map_to_block_ob(self.map_,
direction=self.rogue_env.agent_direction,
pos=self.rogue_env.agent_position,
block_id_dict=block_id_dict,
default_block=block_set["#"])
ob = yender.block_ob_to_hot_vectors(block_ob, len(block_id_dict))
return ob
def reset(self):
self.t = 0
self.total_reward = 0.0
self.map_, self.indicator, start_pos, self.blue_pos, self.red_pos = make_i_maze_map()
start_direction = random.choice(list(self.rogue_env.DIRECTION_SET.values()))
self.rogue_env.reset(self.map_, start_direction, start_pos)
ob = self.get_ob()
return ob
def step(self, action):
self.rogue_env.step(action)
# reward and done check
if self.rogue_env.map_.get_block(self.rogue_env.agent_position).name == "red_tile":
done = True
reward = 1.0 if self.indicator == "Y" else -1.0
elif self.rogue_env.map_.get_block(self.rogue_env.agent_position).name == "blue_tile":
done = True
reward = 1.0 if self.indicator == "G" else -1.0
elif self.t == self.max_step:
done = True
reward = -0.04
else:
done = False
reward = -0.04
# get observation
ob = self.get_ob()
self.t += 1
self.total_reward += reward
return ob, reward, done, self.rogue_env
def render(self):
self.rogue_env.print_map()
print("total_reward {0:0.2f}".format(self.total_reward))
max_episode = 20
max_step = 50
def render(env, episode, t, ob, sleep_time, message=""):
os.system("clear")
print("episode", episode)
print("step", t)
env.render()
print("ob", ob)
time.sleep(sleep_time)
print(message)
def main():
env = I_MazeEnv()
for episode in range(max_episode):
ob = env.reset()
for t in range(max_step):
render(env, episode, t, ob, 0.1)
action = random.choice(range(4)) # random agent
ob, reward, done, info = env.step(action)
if done:
render(env, episode, t, ob, 10, "Episode finished after {} timesteps".format(t+1))
break
if __name__ == "__main__":
main()
|
mit
| 2,953,751,811,628,154,000
| 31.418033
| 99
| 0.565613
| false
| 3.17926
| false
| false
| false
|
kata198/AdvancedHTMLParser
|
AdvancedHTMLParser/xpath/_body.py
|
1
|
84083
|
'''
Copyright (c) 2019 Timothy Savannah under terms of LGPLv3. All Rights Reserved.
See LICENSE (https://gnu.org/licenses/lgpl-3.0.txt) for more information.
See: https://github.com/kata198/AdvancedHTMLParser for full information
==INTERNAL==
xpath._body.py - Internal module for dealing with items within the "body" of a filter expression on a tag
'''
# vim: set ts=4 sw=4 st=4 expandtab :
import copy
import re
from ..Tags import TagCollection
from ..compat import STRING_TYPES
from ..utils import tostr
from .exceptions import XPathNotImplementedError, XPathRuntimeError, XPathParseError
from ._filters import _mk_xpath_op_filter_tag_is_nth_child_index
from .null import Null
# __all__ is currently set to what "parsing" imports
__all__ = ('parseBodyStringIntoBodyElements', 'BodyElement', 'BodyElementOperation', 'BodyElementValue', 'BodyElementValueGenerator', 'BodyLevel_Top')
class BodyElement(object):
'''
BodyElement - Base class of body elements.
Every distinct "unit" within a body, be it a static value or a function call, or otherwise,
are subclassed from this type.
'''
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
groupDict = matchObj.groupdict()
thisElement = cls( **groupDict )
curBodyStr = curBodyStr[ matchObj.span()[1] : ]
return ( thisElement, curBodyStr )
# XXX: This is a container for BodyElements, but itself can be treated as a BodyElement.
# Should give same parent class, or keep separate?
class BodyLevel(BodyElement):
'''
BodyLevel - A single "level" of a body
'''
VALIDATE_ONLY_BOOLEAN_OR_STR = False
def __init__(self):
'''
__init__ - Create this object
'''
self.bodyElements = []
def __repr__(self):
'''
__repr__ - Get a string representation of this object as codeish
@return <str> - String repr
'''
return "%s( bodyElements = %s )" %( self.__class__.__name__, repr(self.bodyElements))
# TODO: Give these a better name, as they could contain BodyElement or BodyLevels
def appendBodyElement(self, bodyElement):
'''
appendBodyElement - Add a body element to the current tail of this level
@param bodyElement <BodyElement> - The body element to add
'''
self.bodyElements.append(bodyElement)
def appendBodyElements(self, bodyElements):
'''
addBodyElements - Add a list of body elements to the current tail of this level
@param bodyElements list<BodyElement> - A list of BodyElements to add
'''
self.bodyElements += bodyElements
def __len__(self):
'''
__len__ - Get number of elements in this group
@return <int> - Number of BodyElements in this group (just this level)
'''
return len(self.bodyElements)
def getBodyElements(self):
'''
getBodyElements - Get the body elements associated with this level
@return list<BodyElement> - List of BodyElements associated with this level
'''
return self.bodyElements
def __iter__(self):
'''
__iter__ - Iterate over this object
'''
for bodyElement in self.bodyElements:
yield bodyElement
raise StopIteration()
def evaluateLevelForTag(self, currentTag):
'''
evaluateLevelForTag - Shorthand version of "evaluateLevelForTags" but for one tag
@param currentTag <AdvancedTag> - A single tag
@return <BodyElementValue> - Resulting value for running this level against given tag
@see evaluateLevelForTags
'''
# TODO: Clean up this function
return self.evaluateLevelForTags( [currentTag] )[0]
def evaluateLevelForTags(self, currentTags):
'''
evaluate - Evaluate this level, and return the final value, for each tag.
@param currentTags list/TagCollection < AdvancedTag > - The current set of tags to process
@return list< BodyElementValue > - The BodyElementValue of the results, in a list 1:1 same order same size as #currentTags
'''
# thisLevelElements - local reference to our elements
thisLevelElements = self.bodyElements
# resultPerTag - This list contains the values to be returned for each tag, in same order as #currentTags
resultPerTag = []
if len(thisLevelElements) == 0:
# This is an empty [], so just return the same
return resultPerTag
# TODO: Optimize this function, further
## These next two arrays provide the common and ordered interface to iterate through all various types which
# need evaluation.
# They are tuples, ( Class, Lambda to Evaluate ). All lambdas within the same set follow same signature
# ORDERED_BE_TYPES_TO_PROCESS_TAGS - The ordered types to process which generate values from the tag itself
ORDERED_BE_TYPES_TO_PROCESS_TAGS = [
(BodyLevel, lambda _bl, _curTag : _bl.evaluateLevelForTag(_curTag) ),
(BodyElementValueGenerator, lambda _bevg, _curTag : _bevg.resolveValueFromTag(_curTag) ),
]
# ORDERED_BE_TYPES_TO_PROCESS_VALUES - The ordered types to process which generate values from left side and right side
ORDERED_BE_TYPES_TO_PROCESS_VALUES = [
(BodyElementOperation, lambda _beo, _leftSide, _rightSide : _beo.performOperation(_leftSide, _rightSide) ),
(BodyElementComparison, lambda _bec, _leftSide, _rightSide : _bec.doComparison(_leftSide, _rightSide) ),
(BodyElementBooleanOps, lambda _bebo, _leftSide, _rightSide : _bebo.doBooleanOp(_leftSide, _rightSide) ),
]
# Iterate over all tags
for thisTag in currentTags:
# curElements - The current set of elements for this tag, as we unroll, this will change.
# Initial value will be reference to the original set of elements
curElements = thisLevelElements
# Run through the tag-processing (value generators, sublevels) ones first
for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_TAGS:
curElements = [ (issubclass( curElement.__class__, typeToProcess ) and processFunction( curElement, thisTag )) or curElement for curElement in curElements ]
# # nextElements - We will assemble into this list the next iteration of #curElements
# nextElements = []
#
# for curElement in curElements:
#
# curElementClass = curElement.__class__
#
# if not issubclass(curElementClass, typeToProcess):
# # Not processing this type, just put back on the list
# nextElements.append( curElement )
#
# else:
# # Processing type, get new value
# generatedValue = processFunction( curElement, thisTag )
# nextElements.append( generatedValue )
#
# # Update #curElements
# curElements = nextElements
# Great, now we have to start keeping track of left/right and process the rest
for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_VALUES:
# nextElements - We will assemble into this list the next iteration of #curElements
nextElements = []
# leftSide - this will be the left side value
leftSide = None
numElements = len(curElements)
i = 0
while i < numElements:
curElement = curElements[i]
curElementClass = curElement.__class__
if not issubclass(curElementClass, typeToProcess ):
# We aren't processing this type, just add it back
nextElements.append( curElement )
# Update previous value and increment counter
leftSide = curElement
i += 1
# Loop back
continue
else:
# Validate that we are not at the end (need to gather a right)
if (i + 1) >= numElements:
# TODO: Better error message?
raise XPathParseError('XPath expression ends in an operation, no right-side to operation.')
# Validate left is right type
if not issubclass(leftSide.__class__, BodyElementValue):
# TODO: Better error message?
raise XPathParseError('XPath expression contains two consecutive operations (left side)')
# Grab and validate right is right type
rightSide = curElements[i + 1]
if not issubclass(rightSide.__class__, BodyElementValue):
# TODO: Better error message?
raise XPathParseError('XPath expression contains two consecutive operations (right side)')
# Resolve a new value feeding left, right into the function
resolvedValue = processFunction( curElement, leftSide, rightSide)
# TODO: Remove this check?
if not issubclass(resolvedValue.__class__, BodyElementValue):
# Not a value? Error for now, may add back looping later if necessary for some ops
raise XPathRuntimeError('XPath expression for op "%s" did not return a BodyElementValue, as expected. Got: <%s> %s' % ( \
repr(curElement),
resolvedValue.__class__.__name__,
repr(resolvedValue),
)
)
# Pop the last value (left side), drop the operation, load the resolved value in place.
nextElements = nextElements[ : -1 ] + [resolvedValue]
# Update new left to this generated value
leftSide = resolvedValue
# Move past right side
i += 2
# Update #curElements
curElements = nextElements
# END: for typeToProcess, processFunction in ORDERED_BE_TYPES_TO_PROCESS_VALUES:
# At this point, should be only one value left. Zero was already handled at start
numElementsRemaining = len(curElements)
if numElementsRemaining != 1:
raise XPathRuntimeError('Got unexpected current number of elements at the end. Expected 1, got %d. Repr: %s' % ( \
numElementsRemaining,
repr(curElements),
)
)
finalElement = curElements[0]
finalElementClass = finalElement.__class__
# TODO: Remove this check?
try:
finalElementValueType = finalElement.VALUE_TYPE
except AttributeError:
# Missing this class attribute implicitly also checks the type,
# as no other types provide such a name.
# TODO: Do a better repr, maybe with string of the xpath?
raise XPathRuntimeError('Final Value resolved from level """%s""" was not a BodyElementValue, as was expected.\nIt is a: %s \nrepr: %s' % ( \
repr(self),
finalElementClass.__name__,
repr(finalElement),
)
)
if self.VALIDATE_ONLY_BOOLEAN_OR_STR and finalElementValueType not in (BODY_VALUE_TYPE_BOOLEAN, BODY_VALUE_TYPE_NUMBER):
raise XPathRuntimeError('Final value resolved from level """%s""" was not an integer or a boolean, cannot proceed.\nVALUE_TYPE is %s.\nClass: %s\nRepr: %s' % ( \
repr(self),
_bodyValueTypeToDebugStr(finalElementValueType),
finalElementClass.__name__,
repr(finalElement),
)
)
# Validated and processed this tag on this level, append to the result array
resultPerTag.append(finalElement)
# END for thisTag in currentTags
return resultPerTag
# TODO: Need to refactor this a bit maybe, to support levels as designed
class BodyLevel_Top(BodyLevel):
'''
BodyLevel_Top - The topmost level of a body. This is the final evaluation before passing onto the next tag filter
'''
VALIDATE_ONLY_BOOLEAN_OR_STR = True
def filterTagsByBody(self, currentTags):
'''
evaluate - Evaluate the topmost level (and all sub levels), and return tags that match.
For the topmost level, we run all components left-to-right, and evaluate the result.
If an integer remains, we use that 1-origin Nth child of parent.
If a boolean remains, we use True to retain, False to discard.
@param currentTags TagCollection/list<AdvancedTag> - Current set of tags to validate
@return TagCollection - The tags which passed validation
'''
retTags = []
if not currentTags:
return retTags
# Process this level and all subs, get the final value per tag for processing
# validation to retain or discard
finalResultPerTag = self.evaluateLevelForTags(currentTags)
numTags = len(currentTags)
for i in range(numTags):
currentTag = currentTags[i]
finalValue = finalResultPerTag[i]
#finalValueClass = finalValue.__class__
# TODO: We should be able to optimize this loop as all results will have either
# a number, or a boolean
if finalValue.VALUE_TYPE == BODY_VALUE_TYPE_BOOLEAN:
shouldRetainTag = finalValue.getValue()
if shouldRetainTag is True:
retTags.append( currentTag )
#elif finalValue.VALUE_TYPE == BODY_VALUE_TYPE_NUMBER:
else:
# This should have already been validated
theValue = finalValue.getValue()
innerNum = int( theValue )
if float(innerNum) != theValue:
# Float value, not integer, return nothing.
continue
# TODO: Better.
testFunc = _mk_xpath_op_filter_tag_is_nth_child_index(currentTag.tagName, innerNum)
retTags += testFunc( currentTag )
#else:
# raise XPathRuntimeError('Error, unexpected value type %s on value: %s' %( _bodyValueTypeToDebugStr(finalValue.VALUE_TYPE), repr(finalValue) ) )
return TagCollection(retTags)
# applyFunction - follow this interface, for now.
applyFunction = filterTagsByBody
#############################
## Values ##
#############################
## Values are calculated (returned from a BodyElementValueGenerator or otherwise),
# or static (provided explicitly in body string).
# These are given separate bases, and are all subclasses of BodyElement.
# Values are associated with a type (cls.VALUE_TYPE), defined as one of the types below.
# Values are wrapped within the associated BodyElementValue subclasses rather than as native python types
##### #####
### BodyElementValue types ###
##### #####
# NOTE: Use enum type? Requires additional package under python2
# An enumeration of the possible types a BodyElementValue subclass may hold
BODY_VALUE_TYPE_UNKNOWN = 0
BODY_VALUE_TYPE_NUMBER = 1
# Leave a gap for 2 should we split float/int
BODY_VALUE_TYPE_STRING = 3
BODY_VALUE_TYPE_BOOLEAN = 4
# List - Unimplemented
BODY_VALUE_TYPE_LIST = 5
BODY_VALUE_TYPE_NULL = 6
# BODY_VALUE_TYPE_TO_STR - The value type integer to a string representation.
BODY_VALUE_TYPE_TO_STR = {
BODY_VALUE_TYPE_UNKNOWN : "unknown",
BODY_VALUE_TYPE_NUMBER : "number",
BODY_VALUE_TYPE_STRING : "string",
BODY_VALUE_TYPE_BOOLEAN : "boolean",
BODY_VALUE_TYPE_LIST : "list",
BODY_VALUE_TYPE_NULL : "null",
}
def _bodyValueTypeToDebugStr(bodyValue):
return "<%d>%s" %(bodyValue, BODY_VALUE_TYPE_TO_STR[bodyValue])
class BodyElementValue(BodyElement):
'''
BodyElementValue - Base class of BodyElements which represent a static or resolved value.
These wrap the native python representation of the values.
A class-level varible, VALUE_TYPE, defines the type associated with the value.
'''
# VALUE_TYPE - The type of this value. Should be set by subclass
VALUE_TYPE = BODY_VALUE_TYPE_UNKNOWN
def __init__(self, value):
'''
__init__ - Create this element as a wrapper around an already-calculated value
@param value <...> - The python-native value to be held by this element.
This will be passed into self.setValue for processing/validation
'''
self.value = None
self.setValue(value)
def getValue(self):
'''
getvalue - Get the value associated with this object
@return <...> - The python-native value wrapped by this object
'''
return self.value
def setValue(self, newValue):
'''
setValue - Sets the value associated with this object
This will be called on all value sets, including __init__ (and from regex)
@param newValue <???> - The new value for this object
'''
self.value = newValue
def __repr__(self):
'''
__repr__ - Get a string representation of this value, with code information
'''
className = self.__class__.__name__
valueType = self.VALUE_TYPE
valueTypeStr = BODY_VALUE_TYPE_TO_STR[ valueType ]
valueRepr = repr( self.getValue() )
return "%s<VALUE_TYPE=%d[%s]>(value=%s)" %( className, valueType, valueTypeStr, valueRepr )
class BodyElementValue_Boolean(BodyElementValue):
'''
BodyElementValue_Boolean - A True/False BodyElementValue, like returned by a comparison operation
'''
VALUE_TYPE = BODY_VALUE_TYPE_BOOLEAN
def setValue(self, newValue):
'''
setValue - Set a boolean value
@param newValue <bool> - Boolean value
@see BodyElementValue.setValue
'''
if not isinstance(newValue, bool):
raise XPathRuntimeError('BodyElementValue_Boolean tried to setValue as a non-boolean type. Was: %s . Repr: %s' %( newValue.__class__.__name__, repr(newValue) ))
self.value = newValue
class BodyElementValue_String(BodyElementValue):
'''
BodyElementValue_String - A string BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_STRING
def setValue(self, newValue):
'''
setValue - Set a string value
@param newValue <str> - String value
@see BodyElementValue.setValue
'''
# TODO: Check type of newValue against str (or str/unicode for py2) ?
self.value = tostr(newValue)
class BodyElementValue_Null(BodyElementValue):
'''
BodyElementValue_Null - A null BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_NULL
def __init__(self, value=Null):
'''
__init__ - Create this object. Override default to allow passing no value (there is only one)
'''
BodyElementValue.__init__(self, value)
def setValue(self, newValue=Null):
'''
setValue - Set a null value
@param newValue <str> - String value
@see BodyElementValue.setValue
'''
# TODO: Do we want this? None == Null?
if newValue is None:
newValue = Null
if newValue != Null:
raise XPathRuntimeError('BodyElementValue_Null tried to set a value but was not Null. Was: %s . Repr: %s' %( newValue.__class__.__name__, repr(newValue)))
self.value = newValue
class BodyElementValue_Number(BodyElementValue):
'''
BodyElementValue_Number - A numeric BodyElementValue
'''
VALUE_TYPE = BODY_VALUE_TYPE_NUMBER
def setValue(self, newValue):
'''
setValue - Sets the inner value to a float, or raises exception on failure to convert.
@param newValue <str/float> - A number (positive or negative, integer or float)
@raises XPathRuntimeError - Type passed is not convertable to float
@see BodyElementValue_StaticValue.setValue
'''
try:
self.value = float(newValue)
except Exception as fe:
raise XPathRuntimeError('Runtime Type Error: BodyElementValue_StaticValue_Number was passed a value, <%s> %s -- but could not convert to float. %s %s' %( \
type(newValue).__name__,
repr(newValue),
fe.__class__.__name__,
str(fe),
)
)
class BodyElementValue_List(BodyElementValue):
'''
BodyElementValue_List - A BodyElementValue which is a list of other values.
All elements within this list will be other BodyElementValues, rather than raw values.
'''
VALUE_TYPE = BODY_VALUE_TYPE_LIST
def __init__(self, initialValues=None):
'''
__init__ - Create this object
@param initialValues <None/list> Initial values to load into the internal list.
'''
if not initialValues:
initialValues = []
BodyElementValue.__init__(self, initialValues)
def setValue(self, newValues):
'''
setValue - Replace the previous lists with new list
@param newValues list<...> - A new list from which to create the internal list.
All items must have a related BodyElementValue type, or already be one.
'''
updatedList = [ ( issubclass(thisVal.__class__, BodyElementValue) and thisVal ) or _pythonValueToBodyElementValue(thisVal) for thisVal in newValues ]
self.value = updatedList
# PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS - The __name__ of the type(val), to the associated BEV container
PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS = {
'int' : BodyElementValue_Number,
'float' : BodyElementValue_Number,
'str' : BodyElementValue_String,
'unicode' : BodyElementValue_String,
'bool' : BodyElementValue_Boolean,
'NoneType' : BodyElementValue_Null,
'list' : BodyElementValue_List,
'tuple' : BodyElementValue_List,
'set' : BodyElementValue_List,
}
def _pythonValueToBodyElementValue(pythonValue):
'''
_pythonValueToBodyElementValue - Convert a native/raw python value to
its respective BodyElementValue subclassed container.
@param pythonValue <???> - The python "raw" value (such as an int or a string)
@return <BodyElementValue subclass> - A created container body element value wrapping provided value
'''
pythonValueTypeName = type(pythonValue).__name__
try:
bodyElementValueClass = PYTHON_TYPE_NAME_TO_BODY_VALUE_CLASS[ pythonValueTypeName ]
except KeyError:
# XXX: Exception or just use an "unknown" base BodyElementValue?
# Maybe better to just shut it down early rather than introduce questionable things on down the line
raise XPathRuntimeError('Failed to find a matching BodyElementValue type from python type "%s" ! Repr: %s' %( pythonValueTypeName, repr(pythonValue) ) )
return bodyElementValueClass( pythonValue )
#############################
## Static Values ##
#############################
# STATIC_VALUES_RES - A list of tuples, which will be iterated upon parsing a body to create the BodyElementValue_StaticValue types
# Tuples are in format: ( re.compile'd expression, BodyElementValue_StaticValue child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
STATIC_VALUES_RES = []
class BodyElementValue_StaticValue(BodyElementValue):
'''
BodyElementValue_StaticValue - Base class of static values ( appear in the body string directly, e.x. "hello" or 12 )
'''
pass
class BodyElementValue_StaticValue_String(BodyElementValue_StaticValue):
'''
BodyElementValue_StaticValue_String - A StaticValue which represents a string
'''
VALUE_TYPE = BODY_VALUE_TYPE_STRING
## String will have two expressions to generate -- one for single quotes, one for double quotes. Both extract the inner string
# Can combine into one, but this is more clear.
# Double quoted string
#BEV_SV_STRING_DOUBLE_QUOTE_RE = re.compile(r'''^([ \t]*[\"](?P<value>[^"]*)[\"][ \t]*)''')
BEV_SV_STRING_DOUBLE_QUOTE_RE = re.compile(r'''^([ \t]*[\"](?P<value>([\\]["]|[^"])*)[\"][ \t]*)''')
STATIC_VALUES_RES.append( (BEV_SV_STRING_DOUBLE_QUOTE_RE, BodyElementValue_StaticValue_String) )
# Single quoted string
#BEV_SV_STRING_SINGLE_QUOTE_RE = re.compile(r"""^([ \t]*[\'](?P<value>[^']*)[\'][ \t]*)""")
BEV_SV_STRING_SINGLE_QUOTE_RE = re.compile(r"""^([ \t]*[\'](?P<value>([\\][']|[^'])*)[\'][ \t]*)""")
STATIC_VALUES_RES.append( (BEV_SV_STRING_SINGLE_QUOTE_RE, BodyElementValue_StaticValue_String) )
class BodyElementValue_StaticValue_Number(BodyElementValue_StaticValue):
'''
BodyElementValue_StaticValue_Number - StaticValue to represent a number
'''
VALUE_TYPE = BODY_VALUE_TYPE_NUMBER
def setValue(self, newValue):
'''
setValue - Sets the inner value to a float, or raises exception on failure to convert.
@param newValue <str/float> - A number (positive or negative, integer or float)
@raises XPathRuntimeError - Type passed is not convertable to float
@see BodyElementValue_StaticValue.setValue
'''
try:
self.value = float(newValue)
except Exception as fe:
raise XPathRuntimeError('Runtime Type Error: BodyElementValue_StaticValue_Number was passed a value, <%s> %s -- but could not convert to float. %s %s' %( \
type(newValue).__name__,
repr(newValue),
fe.__class__.__name__,
str(fe),
)
)
# NOTE: Look into spaces after negative sign
BEV_SV_NUMBER_RE = re.compile(r'''^([ \t]*(?P<value>([-]){0,1}([\d]*[\.][\d]+)|([\d]+))[ \t]*)''')
STATIC_VALUES_RES.append( (BEV_SV_NUMBER_RE, BodyElementValue_StaticValue_Number) )
#############################
## Value Generators ##
#############################
# VALUE_GENERATOR_RES - A list of tuples, which will be iterated upon parsing a body to create the ValueGenerator types
# Tuples are in format: ( re.compile'd expression, BodyElementValueGenerator child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
VALUE_GENERATOR_RES = []
class BodyElementValueGenerator(BodyElement):
'''
BodyElementValueGenerator - Base class of BodyElements which resolve to a BodyValue after execution with context of a tag
'''
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Process "thisTag" to obtain a BodyElementValue relative to this tag and the extending class's implementation
@param thisTag <Tags.AdvancedTag> - The tag of relevance
@return <BodyElementValue> - The resulting value
'''
raise NotImplementedError('BodyElementValueGenerator.resolveValueFromTag is not implemented in type %s! Must use a class extending BodyElementValueGenerator' % ( \
self.__class__.__name__,
)
)
class BodyElementValueGenerator_FetchAttribute(BodyElementValueGenerator):
def __init__(self, attributeName):
'''
__init__ - Create this Value Generator to fetch the value of an attribute
on a tag.
@param attributeName <str> - The name of the attribute to fetch
'''
BodyElementValueGenerator.__init__(self)
self.attributeName = attributeName
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Fetch the value of a given attribute from a tag, and return the value.
@param thisTag <Tags.AdvancedTag> - An instance of a tag on which to work
@return <BodyElementValue> - The value of the attribute, or Null, wrapped in a BodyElementValue container
'''
attributeName = self.attributeName
if attributeName == '*' or '*' in attributeName:
raise XPathNotImplementedError('Wildcard attributes are not yet supported!')
# TODO: Can just use getAttribute with a default?
if not thisTag.hasAttribute( attributeName ):
# No attribute present, return Null
return BodyElementValue_Null()
val = '%s' %( thisTag.getAttribute(attributeName), )
return BodyElementValue_String(val)
def __repr__(self):
'''
__repr__ - Get string representation of this object
'''
return """%s( attributeName = "%s" )""" %( self.__class__.__name__, self.attributeName)
BEVG_FETCH_ATTRIBUTE_RE = re.compile(r'^[ \t]*[@](?P<attributeName>([*]|[a-zA-Z_][a-zA-Z0-9_\-]*))[ \t]*')
VALUE_GENERATOR_RES.append( (BEVG_FETCH_ATTRIBUTE_RE, BodyElementValueGenerator_FetchAttribute) )
class BodyElementValueGenerator_Text(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Text - Implement the 'text()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
return BodyElementValue_String( thisTag.innerText )
BEVG_TEXT_RE = re.compile(r'^([ \t]*[tT][eE][xX][tT][ \t]*[\(][ \t]*[\)][ \t]*)')
VALUE_GENERATOR_RES.append( (BEVG_TEXT_RE, BodyElementValueGenerator_Text) )
class BodyElementValueGenerator_Last(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Text - Implement the 'text()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
parentElement = thisTag.parentElement
if parentElement is None:
# No parent, last() must be 1
return '1'
thisTagName = thisTag.tagName
childrenOfRelevance = [ childEm for childEm in parentElement.children if childEm.tagName == thisTagName ]
return BodyElementValue_Number( len( childrenOfRelevance ) )
BEVG_LAST_RE = re.compile(r'''^([ \t]*[lL][aA][sS][tT][ \t]*[\(][ \t]*[\)][ \t]*)''')
VALUE_GENERATOR_RES.append( (BEVG_LAST_RE, BodyElementValueGenerator_Last) )
class BodyElementValueGenerator_Position(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Position - Implement the 'position()' function
'''
def __init__(self, functionInner=None):
BodyElementValueGenerator.__init__(self)
def resolveValueFromTag(self, thisTag):
parentElement = thisTag.parentElement
if parentElement is None:
# No parent, position() must be 1
return '1'
thisTagName = thisTag.tagName
childrenOfRelevance = [ childEm for childEm in parentElement.children if childEm.tagName == thisTagName ]
return BodyElementValue_Number( childrenOfRelevance.index( thisTag ) + 1 )
BEVG_POSITION_RE = re.compile(r'^([ \t]*[pP][oO][sS][iI][tT][iI][oO][nN][ \t]*[\(][ \t]*[\)][ \t]*)')
VALUE_GENERATOR_RES.append( (BEVG_POSITION_RE, BodyElementValueGenerator_Position) )
##############################
# ValueGenerator Functions #
##############################
# TODO: Create a separate list for REs that associate with functions, rather than sharing with single-level BodyElementValueGenerators?
class BodyElementValueGenerator_Function(BodyElementValueGenerator):
'''
BodyElementValueGenerator_Function - Base class for BodyElementValueGenerator's which are functions (and can take nested levels)
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 0
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'unknown'
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
groupDict = matchObj.groupdict()
restOfBody = groupDict['restOfBody']
( fnArgElements, remainingStr ) = _parseFunctionArgsToBodyElements(restOfBody)
if len(fnArgElements) < cls.FUNCTION_MIN_ARGS:
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.\nError at: %s' % ( \
cls.FUNCTION_NAME_STR,
cls.FUNCTION_MIN_ARGS,
len(fnArgElements),
repr(curBodyStr),
)
)
thisElement = cls( fnArgElements )
return ( thisElement, remainingStr )
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
if fnArgElements is None:
# TODO: Error?
fnArgElements = []
if len(fnArgElements) < self.FUNCTION_MIN_ARGS:
# TODO: More context? Should be raised in #createFromMatch but do here as well for completeness...
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.' %( self.FUNCTION_NAME_STR, self.FUNCTION_MIN_ARGS, len(fnArgElements) ) )
self.fnArgElements = fnArgElements
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Return the BodyElementValue produced by executing this function in the context of a given tag
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue> - The calculated value derived by executing this function
'''
raise NotImplementedError('BodyElement type "%s" (function "%s" ) must implement "BodyElementValueGenerator_Function.resolveValueFromTag" but does not!' % ( \
self.__class__.__name__,
self.FUNCTION_NAME_STR,
)
)
def __repr__(self):
'''
__repr__ - String repr of this class
'''
return """BodyElementValueGenerator_Function<functionName = "%s"> ( fnArgElements = %s )""" %(self.FUNCTION_NAME_STR, repr(self.fnArgElements) )
class BodyElementValueGenerator_Function_Concat(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_Function_Concat - BodyElementValueGenerator class implementing concat function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 2
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'concat'
@classmethod
def createFromMatch(cls, curBodyStr, matchObj):
'''
createFromMatch - Create this BodyElement from a given match object, and return the element and remainder for parsing
@param curBodyStr <str> - The current body string (matchObj should have matched at the head of this)
@param matchObj <re.match> - The match object
@return tuple( createdElement<BodyElement>, remainingBodyStr<str> ) - A tuple of the created element and the remaining portion to parse
'''
# NOTE: The first part is copied for now due to inheritence
#
# We are looking to see if we can optimize this function call to a static value, if resolveable at run time
# Generate the base levels for all the args
groupDict = matchObj.groupdict()
restOfBody = groupDict['restOfBody']
( fnArgElements, remainingStr ) = _parseFunctionArgsToBodyElements(restOfBody)
if len(fnArgElements) < cls.FUNCTION_MIN_ARGS:
raise XPathParseError('"%s" function takes at least %d arguments, but found only %d.\nError at: %s' % ( \
cls.FUNCTION_NAME_STR,
cls.FUNCTION_MIN_ARGS,
len(fnArgElements),
repr(curBodyStr),
)
)
thisElement = cls( fnArgElements )
# Check if we can optimize this whole thing to a static value
staticValueParts = []
isStillStatic = True
for fnArgElement in thisElement.fnArgElements:
fnArgElementClass = fnArgElement.__class__
if issubclass(fnArgElementClass, BodyElementValue):
# Already a value, throw it on the heap
thisPartValue = fnArgElement.getValue()
# TODO: Handle Null -> '' ?
staticValueParts.append(thisPartValue)
continue
elif issubclass(fnArgElementClass, BodyLevel):
# A level, iterate over it.
# Don't bother with recursive, if more than one level deep we won't optimize
for sublevelBodyElement in fnArgElement:
if issubclass(sublevelBodyElement.__class__, BodyElementValue):
sublevelPartValue = sublevelBodyElement.getValue()
staticValueParts.append(sublevelPartValue)
continue
# Not a value already, abort optimization attempt
isStillStatic = False
break
else:
# Not a value already, abort optimization attempt
isStillStatic = False
break
if isStillStatic is False:
# Leave the loop if not static
break
if isStillStatic is True:
# Huzzah! We have unrolled everything and retained a static value!
newElementValue = BodyElementValue_String( ''.join( staticValueParts ) )
#print ( "\nOptimized!\nFrom: %s\nTo: %s\n" %( repr(thisElement), repr(newElementValue) ) )
return (newElementValue, remainingStr)
#else:
#print ( "\nFAILED TO OPTIMIZE!\nFrom: %s\n" %( repr(thisElement), ))
# Failed to optimize, return the concat instance with levels
return ( thisElement, remainingStr )
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Return the concatenated string
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_String> - The concatenated string as a body element value
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
valParts = []
for fnArgElement in self.fnArgElements:
valPartElement = fnArgElement.evaluateLevelForTag(thisTag)
valPartElementValue = valPartElement.getValue()
if valPartElementValue == Null:
# If we got a null, treat it as an empty string for concatenation purposes
valPartElementValue = ''
valParts.append(valPartElementValue)
val = ''.join(valParts)
return BodyElementValue_String(val)
#BEVG_CONCAT_FUNCTION_RE = re.compile(r'''^([ \t]*[cC][oO][nN][cC][aA][tT][ \t]*[\(][ \t]*(?P<fnArgsStr>[^\)]+)[ \t]*[\)][ \t]*)''')
BEVG_FUNCTION_CONCAT_RE = re.compile(r'''^([ \t]*[cC][oO][nN][cC][aA][tT][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_CONCAT_RE, BodyElementValueGenerator_Function_Concat) )
class BodyElementValueGenerator_Function_Contains(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_Function_Contains - BodyElementValueGenerator class implementing contains function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 2
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'contains'
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
BodyElementValueGenerator_Function.__init__(self, fnArgElements)
# Ensure we are given exactly two arguments
fnArgElements = self.fnArgElements
if len(fnArgElements) != 2:
raise XPathParseError('"contains" function takes exactly two arguments, but got %d. Args were: %s' % ( \
len(fnArgElements),
repr(fnArgElements),
)
)
self.string1Arg = fnArgElements[0]
self.string2Arg = fnArgElements[1]
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Test if one string occurs within the other, and return the boolean result
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_Boolean> - True if string1 contains string2, otherwise False
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
string1ValueElement = self.string1Arg.evaluateLevelForTag(thisTag)
string2ValueElement = self.string2Arg.evaluateLevelForTag(thisTag)
try:
string1Value = str( string1ValueElement.getValue() )
except Exception as e1:
raise XPathRuntimeError('Error in contains() - cannot convert first argument to a string! It is %s' %( repr(string1ValueElement.getValue()), ))
try:
string2Value = str( string2ValueElement.getValue() )
except Exception as e2:
raise XPathRuntimeError('Error in contains() - cannot convert second argument to a string! It is %s' %( repr(string2ValueElement.getValue()), ))
containsResult = bool( string2Value in string1Value )
return BodyElementValue_Boolean(containsResult)
BEVG_FUNCTION_CONTAINS_RE = re.compile(r'''^([ \t]*[cC][oO][nN][tT][aA][iI][nN][sS][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_CONTAINS_RE, BodyElementValueGenerator_Function_Contains) )
class BodyElementValueGenerator_Function_NormalizeSpace(BodyElementValueGenerator_Function):
'''
BodyElementValueGenerator_NormalizeSpace - Implement the 'normalize-space()' function
'''
# FUNCTION_MIN_ARGS - Class attribute for the minimum number of args lest there be a parsing error
FUNCTION_MIN_ARGS = 0
# FUNCTION_NAME_STR - Name of the function
FUNCTION_NAME_STR = 'normalize-space'
def __init__(self, fnArgElements=None):
'''
__init__ - Create this object
'''
BodyElementValueGenerator_Function.__init__(self, fnArgElements)
# Ensure we are given exactly two arguments
fnArgElements = self.fnArgElements
numArguments = len(fnArgElements)
if numArguments > 1:
raise XPathParseError('normalize-space function called with too many arguments (0 or 1 supported)')
if numArguments == 1:
self.getString = lambda _thisTag : self._getStringFromArgumentAndTag(0, _thisTag)
else:
self.getString = lambda _thisTag : _thisTag.innerText
def _getStringFromArgumentAndTag(self, argumentNum, thisTag):
'''
_getStringFromArgument - Get the string for the given argument and tag
@param argumentNum <int> - The argument index
@param thisTag <AdvancedTag> - The tag of reference
@return <str> - The string held by that value
'''
valueEm = self.fnArgElements[0].evaluateLevelForTag(thisTag)
if not issubclass(valueEm.__class__, (BodyElementValue_String, BodyElementValue_Null) ):
raise XPathRuntimeError('Got a value returned from within argument to normalize-text which was not string! It was: %s' %( valueEm.VALUE_TYPE, ))
value = str(valueEm.getValue())
return value
def resolveValueFromTag(self, thisTag):
'''
resolveValueFromTag - Test if one string occurs within the other, and return the boolean result
@param thisTag <AdvancedTag> - The tag of interest
@return <BodyElementValue_Boolean> - True if string1 contains string2, otherwise False
@see BodyElementValueGenerator_Function.resolveValueFromTag
'''
stringValue = self.getString(thisTag)
return BodyElementValue_String(stringValue.strip())
BEVG_FUNCTION_NORMALIZE_SPACE_RE = re.compile(r'''^([ \t]*[nN][oO][rR][mM][aA][lL][iI][zZ][eE][\-][sS][pP][aA][cC][eE][ \t]*[\(][ \t]*(?P<restOfBody>.+))$''')
VALUE_GENERATOR_RES.append( (BEVG_FUNCTION_NORMALIZE_SPACE_RE, BodyElementValueGenerator_Function_NormalizeSpace) )
#############################
## Operations ##
#############################
# OPERATION_RES - A list of tuples, which will be iterated upon parsing a body to create the Operation types
# Tuples are in format: ( re.compile'd expression, BodyElementOperation child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
OPERATION_RES = []
class BodyElementOperation(BodyElement):
'''
BodyElementOperation - Base class of BodyElements which perform some operation against the other body elements
'''
def performOperation(self, leftSide, rightSide):
raise NotImplementedError('BodyElementOperation.performOperation is not implemented in type %s! Must use a class extending BodyElementOperation' % ( \
self.__class__.__name__,
)
)
pass
class BodyElementOperation_Concat(BodyElementOperation):
'''
BodyElementOperation_Concat - Operation to handle the concat operator, "||"
'''
def performOperation(self, leftSide, rightSide):
'''
performOperation - Concatenate two strings
@param leftSide <str/BodyElementValue_String> - The left side string (will be the prefix)
@param rightSide <str/BodyElementValue_String> - The right side string (will be the suffix)
@return <BodyElementValue_String> - The concatenated string of leftSide + rightSide
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
if not issubclass(leftSideValue.__class__, STRING_TYPES):
raise XPathRuntimeError('Concat operator tried to concatenate, but left side is not a string type! It is a %s . repr: %s' % ( \
type(leftSideValue).__name__,
repr(leftSideValue),
)
)
if not issubclass(rightSideValue.__class__, STRING_TYPES):
raise XPathRuntimeError('Concat operator tried to concatenate, but right side is not a string type! It is a %s . repr: %s' % ( \
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
#print ( "Left: %s\nRight: %s\n" %(repr(leftSideValue), repr(rightSideValue)) )
val = leftSideValue + rightSideValue
return BodyElementValue_String(val)
BEO_CONCAT_RE = re.compile(r'''^([ \t]*[\|][\|][ \t]*)''')
OPERATION_RES.append( (BEO_CONCAT_RE, BodyElementOperation_Concat) )
class BodyElementOperation_Math(BodyElementOperation):
'''
BodyElementOperation_Math - Base class for math operators
'''
# MATH_OPERATOR_STR - Override with the math operator (e.x. "+")
MATH_OPERATOR_STR = 'unknown'
def _prepareValuesForOperation(self, leftSide, rightSide):
'''
_prepareValuesForOperation - Prepare values for a numeric operation
@param leftSide <str/BodyElementValue/int/float> - The left side of the operation
@param rightSide <str/BodyElementValue/int/float> - The right side of the operation
@return tuple( leftSideValue<float>, rightSideValue<float> )
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
try:
return ( float(leftSideValue), float(rightSideValue) )
except:
raise XPathRuntimeError('Math operation "%s" attempted, but could not convert body sides to numbers!\nLeft side: <%s> %s\nRight side: <%s> %s' % ( \
self.MATH_OPERATOR_STR,
type(leftSideValue).__name__,
repr(leftSideValue),
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
def performOperation(self, leftSide, rightSide):
'''
performOperation - Perform a math operation (see type for details)
@param leftSide <...> - The left side (must be convertable to float)
@param rightSide <...> - The right side (must be convertable to float)
@return <BodyElementValue_Number> - The calculated value
'''
(leftSideValue, rightSideValue) = self._prepareValuesForOperation(leftSide, rightSide)
return self.doCalculation(leftSideValue, rightSideValue)
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Perform the math operation implemented by this subclas.
Subclass must override this method.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
raise NotImplementedError('BodyElementOperation_Math class "%s" must implement doCalculation function!' %( self.__class__.__name__, ))
class BodyElementOperation_Math_Plus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Plus - BodyElementOperation that implements the Math operation "plus" / "addition" / "+"
'''
MATH_OPERATOR_STR = '+'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Add two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue + rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_PLUS_RE = re.compile(r'''^([ \t]*[+][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_PLUS_RE, BodyElementOperation_Math_Plus) )
class BodyElementOperation_Math_Minus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Minus - BodyElementOperation that implements the Math operation "minus" / "subtraction" / "-"
'''
MATH_OPERATOR_STR = '-'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Subtract two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue - rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MINUS_RE = re.compile(r'''^([ \t]*[-][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MINUS_RE, BodyElementOperation_Math_Minus) )
class BodyElementOperation_Math_Multiply(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Multiply - BodyElementOperation that implements the Math operation "multiply" / "multiplication" / "*"
'''
MATH_OPERATOR_STR = '*'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Multiply two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue * rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MULTIPLY_RE = re.compile(r'''^([ \t]*[\*][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MULTIPLY_RE, BodyElementOperation_Math_Multiply) )
class BodyElementOperation_Math_Divide(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Divide - BodyElementOperation that implements the Math operation "divide" / "division" / "div"
'''
MATH_OPERATOR_STR = 'div'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Divide two values, return the result.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue / rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_DIVIDE_RE = re.compile(r'''^([ \t]*[dD][iI][vV][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_DIVIDE_RE, BodyElementOperation_Math_Divide) )
class BodyElementOperation_Math_Modulus(BodyElementOperation_Math):
'''
BodyElementOperation_Math_Modulus - BodyElementOperation that implements the Math operation "modulus" / "%" / "mod"
'''
MATH_OPERATOR_STR = 'mod'
def doCalculation(self, leftSideValue, rightSideValue):
'''
doCalculation - Divide two values, return the remainder.
@param leftSideValue <float> - Left side value
@param rightSideValue <float> - Right side value
@return <BodyElementValue_Number> - The result of the operation
'''
result = leftSideValue % rightSideValue
return BodyElementValue_Number(result)
BEO_MATH_MODULUS_RE = re.compile(r'''^([ \t]*[mM][oO][dD][ \t]*)''')
OPERATION_RES.append( (BEO_MATH_MODULUS_RE, BodyElementOperation_Math_Modulus) )
#############################
## Comparisons ##
#############################
# COMPARISON_RES - A list of tuples, which will be iterated upon parsing a body to create the Comparison types
# Tuples are in format: ( re.compile'd expression, BodyElementComparison child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
COMPARISON_RES = []
class BodyElementComparison(BodyElement):
'''
BodyElementComparison - Base class of Comparison operations (such as equals, not equals, greater than, etc.)
'''
# NUMERIC_ONLY - If True, the value must be represenatble as a float (Number), or error.
# If False, other values (e.x. string) are supported.
NUMERIC_ONLY = False
# COMPARISON_OPERATOR_STR - This should be set to the operator associated with the comparison (e.x. "!=" or "<")
COMPARISON_OPERATOR_STR = 'UNKNOWN'
def doComparison(self, leftSide, rightSide):
'''
doComparison - Do the comparison associated with the subclass of BodyElementComparison
and return the result.
@param leftSide <BodyElementValue/str/float/BodyElementValue> - Left side of comparison operator
@param rightSideValue <BodyElementValue/str/float/other?> - Right side of comparison operator
@return <bool> - The result of the comparison operation
'''
(leftSideValue, rightSideValue) = BodyElementComparison._resolveTypesForComparison(leftSide, rightSide)
return self._doComparison(leftSideValue, rightSideValue)
def _doComparison(self, leftSideValue, rightSideValue):
'''
_doComparison - TYPE INTERNAL. Do the comparison associated with the subclass of BodyElementComparison
and return the result.
This should be implemented by each comparison type, rather than doComparison directly (which prepares arguments)
@param leftSideValue <str/float/other?> - Left side of comparison operator's value (unrolled from its BodyElementValue wrapper)
@param rightSideValue <str/float/other?> - Right side of comparison operator's value (unrolled from its BodyElementValue wrapper)
@return <BodyElementValue_Boolean> - The result of the comparison operation
'''
raise NotImplementedError('BodyElementComparison._doComparison must be implemented by extending subclass, but %s does not implement!' % ( \
self.__class__.__name__,
)
)
@classmethod
def _resolveTypesForComparison(cls, leftSide, rightSide):
'''
_resolveTypesForComparison - Resolve the given leftSide and rightSide dynamic types for comparison
@param leftSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the left side of the operator
@param rightSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the right side of the operator
@return tuple(left, right) of either <float, float> if castable, or the original raw pythonic types instead (pulled out of BodyElementValue if provided in one)
@notes - If cls.NUMERIC_ONLY is True, will throw an exception if cannot cast both sides to float. See raises section, below.
@raises XPathRuntimeError - If NUMERIC_ONLY is True, and cannot cast both sides to a float.
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
# Try to represent both sides as floats (Number), if possible
try:
return ( float(leftSideValue), float(rightSideValue) )
except:
# If we failed to convert both sides to number (e.x. strings), then check if this is a NUMERIC_ONLY type,
# in which case we will throw an error.
# Otherwise, return the raw python types
if cls.NUMERIC_ONLY is False:
return ( leftSideValue, rightSideValue )
else:
# TODO: Say explicitly which side won't convert?
raise XPathRuntimeError('XPath Runtime Error: Numeric-only comparison attempted with non-numeric values! Comparison "%s" only supports both sides being numeric, and cannot convert. Left side is <%s> ( %s ) and Right side is <%s> ( %s )' % ( \
cls.COMPARISON_OPERATOR_STR,
type(leftSideValue).__name__, repr(leftSideValue),
type(rightSideValue).__name__, repr(rightSideValue),
)
)
class BodyElementComparison_Equal(BodyElementComparison):
'''
BodyElementComparison_Equal - A BodyElementComparison which represents the "equals" operation, "="
'''
COMPARISON_OPERATOR_STR = "="
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue == rightSideValue )
BEC_EQUAL_RE = re.compile(r'^([ \t]*[=][ \t]*)')
COMPARISON_RES.append( (BEC_EQUAL_RE, BodyElementComparison_Equal) )
class BodyElementComparison_NotEqual(BodyElementComparison):
'''
BodyElementComparison_NotEqual - A BodyElementComparison which represents the "not equals" operation, "!="
'''
COMPARISON_OPERATOR_STR = "!="
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue != rightSideValue )
BEC_NOT_EQUAL_RE = re.compile(r'^([ \t]*[!][=][ \t]*)')
COMPARISON_RES.append( (BEC_NOT_EQUAL_RE, BodyElementComparison_NotEqual) )
# TODO: Other types of comparison (greater than, less than or equal, etc.)
class BodyElementComparison_LessThan(BodyElementComparison):
'''
BodyElementComparison_LessThan - A BodyElementComparison which represents the "less than" operation, "<"
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '<'
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue < rightSideValue )
BEC_LESS_THAN_RE = re.compile(r'^([ \t]*[<][ \t]*)')
COMPARISON_RES.append( (BEC_LESS_THAN_RE, BodyElementComparison_LessThan) )
class BodyElementComparison_LessThanOrEqual(BodyElementComparison):
'''
BodyElementComparison_LessThanOrEqual - A BodyElementComparison which represents the "less than or equal" operation, "<="
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '<='
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue <= rightSideValue )
BEC_LESS_THAN_OR_EQUAL_RE = re.compile(r'^([ \t]*[<][=][ \t]*)')
COMPARISON_RES.append( (BEC_LESS_THAN_OR_EQUAL_RE, BodyElementComparison_LessThanOrEqual) )
class BodyElementComparison_GreaterThan(BodyElementComparison):
'''
BodyElementComparison_GreaterThan - A BodyElementComparison which represents the "greater than" operation, ">"
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '>'
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue > rightSideValue )
BEC_GREATER_THAN_RE = re.compile(r'^([ \t]*[>][ \t]*)')
COMPARISON_RES.append( (BEC_GREATER_THAN_RE, BodyElementComparison_GreaterThan) )
class BodyElementComparison_GreaterThanOrEqual(BodyElementComparison):
'''
BodyElementComparison_GreaterThanOrEqual - A BodyElementComparison which represents the "greater than or equal" operation, ">="
This is a "NUMERIC_ONLY" comparison operation.
'''
NUMERIC_ONLY = True
COMPARISON_OPERATOR_STR = '>='
def _doComparison(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue <= rightSideValue )
BEC_GREATER_THAN_OR_EQUAL_RE = re.compile(r'^([ \t]*[>][=][ \t]*)')
COMPARISON_RES.append( (BEC_GREATER_THAN_OR_EQUAL_RE, BodyElementComparison_GreaterThanOrEqual) )
#############################
## Boolean Ops ##
#############################
# BOOLEAN_OPS_RES - A list of tuples, which will be iterated upon parsing a body to create the BooleanOps types
# Tuples are in format: ( re.compile'd expression, BodyElementBooleanOps child class implementing related )
#
# Where all of the named groups within the compiled regular expression are passed to __init__ of the related class.
BOOLEAN_OPS_RES = []
class BodyElementBooleanOps(BodyElement):
'''
BodyElementBooleanOps - Base comparison class for boolean comparison operations (e.x. "and" , "or" )
'''
# BOOLEAN_OP_STR - The boolean operation being implemented, should be set by the subclass.
BOOLEAN_OP_STR = 'unknown'
def doBooleanOp(self, leftSide, rightSide):
'''
doBooleanOp - Do the comparison associated with the subclass of BodyElementBooleanOps
and return the result.
@param leftSide <BodyElementValue/str/float/BodyElementValue> - Left side of comparison operator
@param rightSideValue <BodyElementValue/str/float/other?> - Right side of comparison operator
@return <bool> - The result of the comparison operation
'''
(leftSideValue, rightSideValue) = BodyElementBooleanOps._resolveTypesForBooleanOp(leftSide, rightSide)
return self._doBooleanOp(leftSideValue, rightSideValue)
def _doBooleanOp(self, leftSideValue, rightSideValue):
'''
_doBooleanOp - TYPE INTERNAL. Do the comparison associated with the subclass of BodyElementBooleanOp
and return the result.
This should be implemented by each comparison type, rather than doBooleanOp directly (which prepares arguments)
@param leftSideValue <str/float/other?> - Left side of comparison operator's value
@param rightSideValue <str/float/other?> - Right side of comparison operator's value
@return <bool> - The result of the comparison operation
'''
raise NotImplementedError('BodyElementBooleanOps._doBooleanOp must be implemented by extending subclass, but %s does not implement!' % ( \
self.__class__.__name__,
)
)
@classmethod
def _resolveTypesForBooleanOp(cls, leftSide, rightSide):
'''
_resolveTypesForBooleanOp - Resolve the given leftSide and rightSide dynamic types for comparison
Boolean type overrides the comparison base in order to only accept booleans (instead of numeric / strings)
@param leftSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the left side of the operator.
Must be or resolve to a boolean
@param rightSide <BodyElementValue/...> - A value, either wrapped in a BodyElementValue or direct.
Represents the right side of the operator
Must be or resolve to a boolean
@return tuple(left<bool>, right<bool>)
@raises XPathRuntimeError - If either side is not a boolean, or a boolean-wrapped BodyElementValue
'''
if issubclass(leftSide.__class__, BodyElementValue):
leftSideValue = leftSide.getValue()
else:
leftSideValue = leftSide
if issubclass(rightSide.__class__, BodyElementValue):
rightSideValue = rightSide.getValue()
else:
rightSideValue = rightSide
# TODO: Provide better context here of where this operation was in the xpath string?
if not isinstance(leftSideValue, bool):
# Should this be a parse error? Their expression caused it....
raise XPathRuntimeError('XPath Runtime Error: Boolean comparison attempted ( "%s" operator ) but left side was not a boolean! Was: %s . Repr: %s' % ( \
cls.BOOLEAN_OP_STR,
type(leftSideValue).__name__,
repr(leftSideValue),
)
)
if not isinstance(rightSideValue, bool):
raise XPathRuntimeError('XPath Runtime Error: Boolean comparison attempted ( "%s" operator ) but right side was not a boolean! Was: %s . Repr: %s' % ( \
cls.BOOLEAN_OP_STR,
type(rightSideValue).__name__,
repr(rightSideValue),
)
)
return ( leftSideValue, rightSideValue )
class BodyElementBooleanOps_And(BodyElementBooleanOps):
'''
BodyElementBooleanOps_And - A BodyElementBooleanOps which represents the "and" operation -
will check that both the left and right side are True
'''
BOOLEAN_OP_STR = 'and'
def _doBooleanOp(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue and rightSideValue )
# NOTE: these requires a whitespace after, unlike other operators.
BEBO_AND_RE = re.compile(r'^([ \t]*[aA][nN][dD][ \t]+)')
BOOLEAN_OPS_RES.append( (BEBO_AND_RE, BodyElementBooleanOps_And) )
class BodyElementBooleanOps_Or(BodyElementBooleanOps):
'''
BodyElementBooleanOps_Or - A BodyElementBooleanOps which represents the "or" operation -
will check that either the left and right side are True
'''
BOOLEAN_OP_STR = 'or'
def _doBooleanOp(self, leftSideValue, rightSideValue):
return BodyElementValue_Boolean( leftSideValue or rightSideValue )
BEBO_OR_RE = re.compile(r'^([ \t]*[oO][rR][ \t]+)')
BOOLEAN_OPS_RES.append( (BEBO_OR_RE, BodyElementBooleanOps_Or) )
# ALL_BODY_ELEMENT_RES - All regular expressions used in parsing out a body into individual operations
ALL_BODY_ELEMENT_RES = VALUE_GENERATOR_RES + STATIC_VALUES_RES + COMPARISON_RES + OPERATION_RES + BOOLEAN_OPS_RES
# NOTE: Static values should come before operations, so negative values match as a static value and not a substract operation
class BodyLevel_Group(BodyLevel):
'''
BodyLevel_Group - A group of elements
'''
def __init__(self, groupMembers=None):
'''
__init__ - Create this element
@param groupMembers list<BodyElement> - Members of this group
'''
BodyLevel.__init__(self)
if not groupMembers:
groupMembers = []
self.appendBodyElements(groupMembers)
# BODY_ELEMENT_GROUP_OPEN_RE - The opening of a parenthesis group
BODY_ELEMENT_GROUP_OPEN_RE = re.compile(r'^([ \t]*[\(](?P<restOfBody>.+)[ \t]*)$')
# BODY_ELEMENT_GROUP_CLOSE_RE - The closing of a parenthesis group
BODY_ELEMENT_GROUP_CLOSE_RE = re.compile(r'^(?P<endOfGroup>[ \t]*[\)][ \t]*)')
def _parseBodyLevelGroup(restOfBody):
'''
_parseBodyLevelGroup - Parse a group, within parenthesis
@param restOfBody <str> - The remainder of the body string to parse
@return tuple< <BodyLevel_Group>, remainderStr<str> > - The group parsed, and the unused portion of the str on which to continue parsing at parent level
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
bodyElementGroupCloseRE = BODY_ELEMENT_GROUP_CLOSE_RE
curString = restOfBody[:].strip()
ret = []
foundCloseParen = False
while curString:
gotMatch = False
groupCloseMatch = bodyElementGroupCloseRE.match(curString)
if groupCloseMatch:
# We are at the end of this group, return the rest of the string back upward
gotMatch = True
newCurString = curString[ groupCloseMatch.span()[1] : ]
curString = newCurString
foundCloseParen = True
break
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
ret.append(subLevel)
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
ret.append(thisElement)
curString = newCurString
if foundCloseParen is False:
raise XPathParseError('Missing close parenthesis for section: "%s"' %(restOfBody, ))
# Optimization: Before returning, run through and perform any operations against static values possible
#newRet = _optimizeStaticValueCalculations(ret)
ret = _optimizeStaticValueCalculations(ret)
#print ( "\nPrevious BodyElements(%2d): %s\n\n New BodyElements(%2d): %s\n" %( len(ret), repr(ret), len(newRet), repr(newRet)) )
#return newRet
return ( BodyLevel_Group(ret), curString )
# BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE - The next argument
BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE = re.compile(r'^([ \t]*[,][ \t]*)')
def _parseFunctionArgsToBodyElements(restOfBody):
'''
_parseFunctionArgsToBodyElements - Parse function arguments into BodyElements
@param restOfBody <str> - The remainder of the body string to parse
@return tuple< list<BodyLevel_Group>, remainderStr<str> > - The groups parsed (one per arg), and the unused portion of the str on which to continue parsing at parent level
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
bodyElementGroupCloseRE = BODY_ELEMENT_GROUP_CLOSE_RE
bodyElementGroupFunctionNextArgRE = BODY_ELEMENT_GROUP_FUNCTION_NEXT_ARG_RE
curString = restOfBody[:].strip()
fnArgs = []
curGroupElements = []
foundCloseParen = False
while curString:
gotMatch = False
groupCloseMatch = bodyElementGroupCloseRE.match(curString)
if groupCloseMatch:
# We are at the end of this group, return the rest of the string back upward
gotMatch = True
newCurString = curString[ groupCloseMatch.span()[1] : ]
curString = newCurString
foundCloseParen = True
break
nextArgMatch = bodyElementGroupFunctionNextArgRE.match(curString)
if nextArgMatch:
# We hit a comma, should move onto the next arg
gotMatch = True
if len(curGroupElements) == 0:
# TODO: More information here?
raise XPathParseError('Function call has empty argument, at: %s' %(curString, ))
# Append the current group and begin the next
# Optimize the group elements
curGroupElements = _optimizeStaticValueCalculations(curGroupElements)
if False and len(curGroupElements) == 1:
# TODO: Support this optimization -- will require a bit of interface massaging so common interface
# We have optimized down to a single element, so add that instead of the level
fnArgs.append( curGroupElements[0] )
else:
# More than one, create a group and append it
curGroup = BodyLevel_Group( curGroupElements )
fnArgs.append( curGroup )
# TODO: Validate we don't just have trailing comma
# Create a new list for future elements
curGroupElements = []
newCurString = curString[ nextArgMatch.span()[1] : ]
curString = newCurString
continue
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
curGroupElements.append( subLevel )
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
curGroupElements.append( thisElement )
curString = newCurString
if foundCloseParen is False:
raise XPathParseError('Missing close parenthesis for section: "%s"' %(restOfBody, ))
if len(curGroupElements) > 0:
# Optimize the group elements
curGroupElements = _optimizeStaticValueCalculations(curGroupElements)
if False and len(curGroupElements) == 1:
# We have optimized down to a single element, so add that instead of the level
fnArgs.append( curGroupElements[0] )
else:
# More than one, create a group and append it
curGroup = BodyLevel_Group( curGroupElements )
fnArgs.append( curGroup )
# TODO: Optimize the args, can pull out of levels if only one arg
return ( fnArgs, curString )
def parseBodyStringIntoBodyElements(bodyString):
'''
parseBodyStringIntoBodyElements - Parses the body string of a tag filter expression (between square brackets)
into individual body elements.
@param bodyString <str> - A body string of an XPath expression
@return list<BodyElement> - A list of matched BodyElement items, in order of appearance.
@raises XPathParseError - Failure to parse
'''
allBodyElementREs = ALL_BODY_ELEMENT_RES
bodyElementGroupOpenRE = BODY_ELEMENT_GROUP_OPEN_RE
curString = bodyString[:].strip()
ret = []
while curString:
gotMatch = False
groupOpenMatch = bodyElementGroupOpenRE.match(curString)
if groupOpenMatch:
gotMatch = True
(subLevel, newCurString) = _parseBodyLevelGroup( groupOpenMatch.groupdict()['restOfBody'] )
ret.append(subLevel)
curString = newCurString
continue
else:
for ( bodyElementRE, bodyElementClass ) in allBodyElementREs:
matchObj = bodyElementRE.match(curString)
if matchObj is None:
continue
gotMatch = True
break
if gotMatch is False:
raise XPathParseError('Failed to parse body string into usable part, at: "%s"' %(curString, ))
(thisElement, newCurString) = bodyElementClass.createFromMatch(curString, matchObj)
ret.append(thisElement)
curString = newCurString
# Optimization: Before returning, run through and perform any operations against static values possible
#newRet = _optimizeStaticValueCalculations(ret)
ret = _optimizeStaticValueCalculations(ret)
#print ( "\nPrevious BodyElements(%2d): %s\n\n New BodyElements(%2d): %s\n" %( len(ret), repr(ret), len(newRet), repr(newRet)) )
#return newRet
return ret
def _optimizeStaticValueCalculations(bodyElements):
'''
_optimizeStaticValueCalculations - Optimize element portions that can be pre-calculated
@param bodyElements - list<BodyElement> - List of BodyElements following parsing of XPath string
@return list<BodyElement> - Optimized list of BodyElements, where pre-calculated operations are ran once at parse-time
instead of per tag at run-time.
'''
numOrigElements = len(bodyElements)
if numOrigElements <= 2:
# Nothing to do
return bodyElements
# We are already going to hit __class__ on every object, so do it ahead of time
# in a quicker list comprehension, which we will reference later
bodyElementClasses = [bodyElement.__class__ for bodyElement in bodyElements]
# No benefit in checking if we have any BodyElementOperation (or future optimizations) first,
# as we will already iterate over everything. The only thing saved when none would be recreating the list,
# at the expense of O(n) vs O(2n) for the check in the event we can optimize.
ret = []
prevElement = bodyElements[0]
prevElementClass = bodyElementClasses[0]
ret.append(prevElement)
i = 1
while i < numOrigElements:
curElement = bodyElements[i]
curElementClass = bodyElementClasses[i]
if issubclass(curElementClass, (BodyElementOperation, BodyElementComparison)):
# If we have an operation to optimize, check if left and right are already values.
# If so, we can run it.
if (i+1) < numOrigElements and issubclass(prevElementClass, BodyElementValue):
# We are not on the last element, and the previous was a value.
# If next is value, run the operation.
nextElement = bodyElements[i + 1]
nextElementClass = bodyElementClasses[i + 1]
if issubclass(nextElementClass, BodyElementValue):
# Score! We can optimize!
if issubclass(curElementClass, BodyElementOperation):
calculatedValue = curElement.performOperation(prevElement, nextElement)
#elif issubclass(curElementClass, BodyElementComparison):
else:
# Only Comparison left
calculatedValue = curElement.doComparison(prevElement, nextElement)
# Strip off the previous value, and replace this operation and next value with calculated
ret = ret[ : -1 ] + [calculatedValue]
# Set previous value to this value
prevElement = calculatedValue
prevElementClass = prevElement.__class__
# And increment past the next element
i += 2
continue
# No optimization available, add the element as-is
ret.append(curElement)
# Update previous element to this element for next round
prevElement = curElement
prevElementClass = curElementClass
# Increment to next element
i += 1
return ret
# vim: set ts=4 sw=4 st=4 expandtab :
|
lgpl-3.0
| -9,045,549,128,452,777,000
| 33.874741
| 258
| 0.621279
| false
| 4.3537
| false
| false
| false
|
dcos/dcos
|
packages/adminrouter/extra/src/test-harness/modules/mocker/common.py
|
1
|
7971
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""
Shared management code for DC/OS mocks used by AR instances, both EE and Open.
"""
import concurrent.futures
import logging
from mocker.endpoints.marathon import MarathonEndpoint
from mocker.endpoints.mesos import MesosEndpoint
from mocker.endpoints.mesos_dns import MesosDnsEndpoint
from mocker.endpoints.reflectors import (
ReflectingTcpIpEndpoint,
ReflectingUnixSocketEndpoint,
)
log = logging.getLogger(__name__)
class MockerBase:
"""This class represents mocking behaviour shared between both EE and Open
repositories.
It should not be instantiated directly but instead inheriting classes should
override/extend it's methods.
"""
_endpoints = None
def _register_endpoints(self, endpoints):
"""Register given endpoints list with the mock
This method registers all the endpoints that are going to be managed
by this Mocker instance.
Args:
endpoints (object: [EndpointA, EndpointB,...]): list of endpoints
that should be registered
"""
self._endpoints = {}
for endpoint in endpoints:
log.info("Registering endpoint `%s`", endpoint.id)
assert endpoint.id not in self._endpoints
self._endpoints[endpoint.id] = endpoint
@staticmethod
def _create_common_endpoints():
"""Helper function that takes care of creating/instantiating all the
endpoints that are common for both EE and Open repositories"""
res = []
# pkgpanda endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/pkgpanda-api.sock'))
# exhibitor
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=8181))
# Mesos masters
res.append(MesosEndpoint(ip='127.0.0.2', port=5050))
res.append(MesosEndpoint(ip='127.0.0.3', port=5050))
# Marathon instances running on the masters
res.append(MarathonEndpoint(ip='127.0.0.1', port=8080))
res.append(MarathonEndpoint(ip='127.0.0.2', port=8080))
# cosmos
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=7070))
# dcos-net
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=62080))
# Mesos agents:
# - plain/without TLS
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=15001))
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=15002))
# - TLS version. It's used for testing e.g. DEFAULT_SCHEME variable
# where AR is connecting to the upstream Mesos Agent using TLS.
# 127.0.0.1 address stems from certificate names matching.
res.append(ReflectingTcpIpEndpoint(
ip='127.0.0.1',
port=15401,
certfile='/run/dcos/pki/tls/certs/adminrouter-ec.crt',
keyfile='/run/dcos/pki/tls/private/adminrouter-ec.key'))
# Agent3
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.4', port=15003))
# Agent AR 1
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=61001))
# Agent AR 2
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=61001))
# task /scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16000))
# task /nest1/scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=17000))
# task /nest2/nest1/scheduler-alwaysthere
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18000))
# task /nest2/nest1/scheduler-onlymarathon
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18001))
# task /nest2/nest1/scheduler-onlymesos
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18002))
# task /nest2/nest1/scheduler-onlymesosdns
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18003))
# task /scheduler-alwaysthere but with different ip+port, used i.e. in
# `/service` endpoint tests
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.15', port=16001))
# catch-all for /scheduler-alwaysthere task. Its role is to respond for all
# the requests which i.e. used mesos_dns'es second entry in SRV reply.
# Successfull tests will never use it.
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16002))
# other Admin Router Masters, used i.e. during Marathon leader testing
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=80))
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=80))
res.append(ReflectingTcpIpEndpoint(
ip='127.0.0.4',
port=443,
certfile='/run/dcos/pki/tls/certs/adminrouter-ec.crt',
keyfile='/run/dcos/pki/tls/private/adminrouter-ec.key'))
# metrics endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/telegraf-dcos-metrics.sock'))
# log endpoint
res.append(ReflectingUnixSocketEndpoint('/run/dcos/dcos-log.sock'))
# Mesos DNS
res.append(MesosDnsEndpoint(ip='127.0.0.1', port=8123))
# DDDT, two variants:
# TODO (prozlach): cleanup DDDT sockets
res.append(
ReflectingTcpIpEndpoint(ip='127.0.0.1', port=1050))
res.append(
ReflectingUnixSocketEndpoint('/run/dcos/dcos-diagnostics.sock'))
# DC/OS Metronome
res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=9000))
# Checks API
res.append(
ReflectingUnixSocketEndpoint('/run/dcos/dcos-checks-api.sock'))
# TODO - other endpoints common for all flavours go here...
return res
def __init__(self, extra_endpoints=None):
"""Initialize new MockerBase instance
Args:
extra_endpoints (obj: [EndpointA, EndpointB,...]): list of endpoints
that are unique to the inheriting class/represent specific behaviour
of given flavour
"""
common_endpoints = self._create_common_endpoints()
endpoints = common_endpoints + extra_endpoints
self._register_endpoints(endpoints)
def start(self):
"""Start all endpoints registered with this Mocker instance"""
with concurrent.futures.ThreadPoolExecutor() as executor:
for endpoint in self._endpoints.values():
executor.submit(endpoint.start)
def stop(self):
"""Stop all endpoints registered with this Mocker instance.
Usually called right before object destruction
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
for endpoint in self._endpoints.values():
executor.submit(endpoint.stop)
def reset(self):
"""Reset all the endpoints to their initial state
Used to make sure that all the tests start with fresh state/are not
interfering with each other through Mocker
"""
for endpoint in self._endpoints.values():
endpoint.reset()
def send_command(self, endpoint_id, func_name, aux_data=None):
"""Reconfigure endpoint manager by Mocker
This method reconfigures endpoint previously started by Mocker. The
reconfiguration is basically calling method `func_name` belonging to
endpoint `endpoint_id` with data `aux_data`
Args:
endpoint_id (str): id of the endpoint to reconfigure
func_name (str): name of the endpoint's function to call
aux_data (str): auxilary data to pass to function
Returns:
Depends on the endpoint - it returns anything that endpoint returns.
Raises:
KeyError: endpoint with given id does not exists
AttributeError: endpoint does not defines function `func_name`
"""
endpoint = self._endpoints[endpoint_id]
f = getattr(endpoint, func_name)
return f(aux_data)
|
apache-2.0
| -4,933,649,222,516,970,000
| 40.952632
| 88
| 0.648476
| false
| 3.763456
| false
| false
| false
|
gnachman/iTerm2
|
api/library/python/iterm2/iterm2/variables.py
|
1
|
3098
|
"""
Provides support for iTerm2 variables, which hold information associated
with various objects such as sessions, tabs, and windows.
"""
import asyncio
import enum
import json
import typing
import iterm2.connection
import iterm2.notifications
class VariableScopes(enum.Enum):
"""Describes the scope in which a variable can be evaluated."""
SESSION = iterm2.api_pb2.VariableScope.Value("SESSION") #: Session scope
TAB = iterm2.api_pb2.VariableScope.Value("TAB") #: Tab scope
WINDOW = iterm2.api_pb2.VariableScope.Value("WINDOW") #: Window scope
APP = iterm2.api_pb2.VariableScope.Value("APP") #: Whole-app scope
class VariableMonitor:
"""
Watches for changes to a variable.
`VariableMonitor` is a context manager that helps observe changes in iTerm2
Variables.
:param connection: The connection to iTerm2.
:param scope: The scope in which the variable should be evaluated.
:param name: The variable name.
:param identifier: A tab, window, or session identifier. Must correspond to
the passed-in scope. If the scope is `APP` this should be None. If the
scope is `SESSION` or `WINDOW` the identifier may be "all" or "active".
.. seealso::
* Example ":ref:`colorhost_example`"
* Example ":ref:`theme_example`"
Example:
.. code-block:: python
async with iterm2.VariableMonitor(
connection,
iterm2.VariableScopes.SESSION,
"jobName",
my_session.session_id) as mon:
while True:
new_value = await mon.async_get()
DoSomething(new_value)
"""
def __init__(
self,
connection: iterm2.connection.Connection,
scope: VariableScopes,
name: str,
identifier: typing.Optional[str]):
self.__connection = connection
self.__scope = scope
self.__name = name
self.__identifier = identifier
self.__token = None
self.__queue: asyncio.Queue = asyncio.Queue(
loop=asyncio.get_event_loop())
async def __aenter__(self):
async def callback(_connection, message):
"""Called when a variable changes."""
await self.__queue.put(message)
self.__token = await (
iterm2.notifications.
async_subscribe_to_variable_change_notification(
self.__connection,
callback,
self.__scope.value,
self.__name,
self.__identifier))
return self
async def async_get(self) -> typing.Any:
"""Returns the new value of the variable."""
result = await self.__queue.get()
json_new_value = result.json_new_value
return json.loads(json_new_value)
async def __aexit__(self, exc_type, exc, _tb):
try:
await iterm2.notifications.async_unsubscribe(
self.__connection, self.__token)
except iterm2.notifications.SubscriptionException:
pass
|
gpl-2.0
| -8,994,132,710,832,068,000
| 31.957447
| 79
| 0.603938
| false
| 4.314763
| false
| false
| false
|
breuderink/psychic
|
psychic/nodes/filter.py
|
1
|
1820
|
import numpy as np
from scipy import signal
from golem import DataSet
from golem.nodes import BaseNode
from psychic.utils import get_samplerate
class Filter(BaseNode):
def __init__(self, filt_design_func):
'''
Forward-backward filtering node. filt_design_func is a function that takes
the sample rate as an argument, and returns the filter coefficients (b, a).
'''
BaseNode.__init__(self)
self.filt_design_func = filt_design_func
def train_(self, d):
fs = get_samplerate(d)
self.log.info('Detected sample rate of %d Hz' % fs)
self.filter = self.filt_design_func(fs)
def apply_(self, d):
b, a = self.filter
xs = np.hstack([signal.filtfilt(b, a, d.xs[:, i]).reshape(-1, 1)
for i in range(d.nfeatures)])
return DataSet(xs=xs, default=d)
class OnlineFilter(Filter):
def __init__(self, filt_design_func):
Filter.__init__(self, filt_design_func)
self.zi = []
def apply_(self, d):
b, a = self.filter
if self.zi == []:
self.zi = [signal.lfiltic(b, a, np.zeros(b.size)) for fi in
range(d.nfeatures)]
new_zi = []
xs = []
for i in range(d.nfeatures):
xi, zii = signal.lfilter(b, a, d.xs[:, i], zi=self.zi[i])
xs.append(xi.reshape(-1, 1))
new_zi.append(zii)
self.zi = new_zi
return DataSet(xs=np.hstack(xs), default=d)
class Winsorize(BaseNode):
def __init__(self, cutoff=[.05, .95]):
self.cutoff = np.atleast_1d(cutoff)
assert self.cutoff.size == 2
BaseNode.__init__(self)
def train_(self, d):
assert len(d.feat_shape) == 1
self.lims = np.apply_along_axis(lambda x: np.interp(self.cutoff,
np.linspace(0, 1, d.ninstances), np.sort(x)), 0, d.xs)
def apply_(self, d):
return DataSet(xs=np.clip(d.xs, self.lims[0,:], self.lims[1:]),
default=d)
|
bsd-3-clause
| 3,304,547,324,791,819,000
| 28.836066
| 79
| 0.621978
| false
| 3.0033
| false
| false
| false
|
Ferjapolis/Indexador-MongoDB
|
sv03ToMongo.py
|
1
|
2461
|
# -*- coding: cp1252 -*-
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
import os
import sys
from PyPDF2 import PdfFileReader
enc = sys.stdin.encoding
c = MongoClient("localhost",27017)
print ("Conectado")
dbh = c["sv03"]
errores = dbh["errores"]
def carga(tabla,carpeta):
collections = dbh[tabla]
listaB= [""]
rootdir = r"\\sv-03\QA12\GENERAL\PROYECTOS\\"+str(carpeta)
for path, dirs, files in os.walk(rootdir):
for fil in files:
ruta = str(path)+"\\"+str(fil)
search = collections.find_one({"path":ruta})
if search is None:
try:
datos = {}
statinfo = os.stat(ruta)
datos["size"] = statinfo.st_size
datos["path"] = ruta
ext = fil.lower().rsplit('.', 1)[-1]
extension = "."+ext
datos["name"] = fil.replace(extension,"")
datos["format"] = ext
if ext =="pdf":
try:
Formatos = []
Point = 0.0353
pdf = PdfFileReader(open(ruta,'rb'))
num_pages = int(pdf.getNumPages())
datos["pag"]=num_pages
for i in range(0, num_pages):
hoja = pdf.getPage(i).mediaBox.upperRight
ancho = int(int(hoja[0])*Point)
largo = int(int(hoja[1])*Point)
formato = str(ancho)+"x"+str(largo)
Formatos.append(formato)
except:
datos["pag"]="Error"
hojas = {}
for elemn in Formatos:
if not elemn in hojas:
hojas[elemn] = 1
if elemn in hojas:
hojas[elemn] = 1 + hojas[elemn]
datos["pagF"] = hojas
if not datos["name"] in listaB:
collections.insert_one(datos)
except:
falla = {"path":ruta}
errores.insert_one(falla)
else:
"error"
print("cargado")
|
unlicense
| 5,193,583,938,490,873,000
| 37.453125
| 76
| 0.407152
| false
| 4.250432
| false
| false
| false
|
thelabnyc/django-oscar-wfrs
|
src/wellsfargo/tests/connector/test_client.py
|
1
|
2934
|
from urllib.parse import parse_qs
from django.core.cache import cache
from django.test import TestCase
from wellsfargo.connector.client import WFRSGatewayAPIClient
import requests_mock
class WFRSGatewayAPIClientTest(TestCase):
def setUp(self):
super().setUp()
cache.clear()
@requests_mock.Mocker()
def test_get_api_key(self, rmock):
call_count = {
"i": 0,
}
# Setup mock for generating a token
def match_request(request):
# Check auth header
self.assertTrue(request.headers["Authorization"].startswith("Basic "))
# Check data in body
data = parse_qs(request.body)
self.assertEqual(
data,
{
"grant_type": [
"client_credentials",
],
"scope": [
" ".join(
[
"PLCCA-Prequalifications",
"PLCCA-Applications",
"PLCCA-Payment-Calculations",
"PLCCA-Transactions-Authorization",
"PLCCA-Transactions-Charge",
"PLCCA-Transactions-Authorization-Charge",
"PLCCA-Transactions-Return",
"PLCCA-Transactions-Cancel-Authorization",
"PLCCA-Transactions-Void-Return",
"PLCCA-Transactions-Void-Sale",
"PLCCA-Transactions-Timeout-Authorization-Charge",
"PLCCA-Transactions-Timeout-Return",
"PLCCA-Account-Details",
]
),
],
},
)
# Increment call count
call_count["i"] += 1
return True
# Register request mock
rmock.post(
"https://api-sandbox.wellsfargo.com/token",
additional_matcher=match_request,
json={
"access_token": "16a05f65dd41569af67dbdca7ea4da4d",
"scope": "",
"token_type": "Bearer",
"expires_in": 79900,
},
)
self.assertEqual(call_count["i"], 0)
# Get a token
token = WFRSGatewayAPIClient().get_api_key()
self.assertEqual(token.api_key, "16a05f65dd41569af67dbdca7ea4da4d")
self.assertEqual(token.is_expired, False)
self.assertEqual(call_count["i"], 1)
# Get token again
token = WFRSGatewayAPIClient().get_api_key()
self.assertEqual(token.api_key, "16a05f65dd41569af67dbdca7ea4da4d")
self.assertEqual(token.is_expired, False)
self.assertEqual(call_count["i"], 1)
|
isc
| 9,074,675,246,136,069,000
| 35.675
| 82
| 0.480913
| false
| 4.493109
| false
| false
| false
|
ivannz/study_notes
|
data_study/facebook2012/base.py
|
1
|
2045
|
# -*- coding: UTF-8 -*-
## Base modules
import scipy.sparse as sp
import numpy as np
from collections import deque
import pandas as pd
## Read a sparese adjacency matrix from a two-column CSV file
def __csr_from_csv( file_name, **kwargs ) :
return __csr_from_pandas( pd.read_csv( file_name, **kwargs ) )
## Creates a sparse matrix from a two-column source-destination dataframe
def __csr_from_pandas( df, symmetrize = False ) :
return __csr_from_endpoints( df.values[ :, 0 ],
df.values[ :, 1 ], symmetrize = symmetrize )
def __csr_from_endpoints( u, v, symmetrize = False ) :
assert( len( u ) == len( v ) )
## Convert to a COO matrix
if not symmetrize :
adj = sp.coo_matrix( ( np.ones( len( u ), dtype = np.float ), ( u, v ) ) )
else :
adj = sp.coo_matrix( ( np.ones( len( u ) + len( v ), dtype = np.float ),
( np.concatenate( ( u, v ) ), np.concatenate( ( v, u ) )) ) )
## Convert to CSR and remove duplicates
adj = adj.tocsr( ) ; adj.data[ : ] = 1
return adj
def __sparse_bfs( A, sources, num_nodes = np.inf, max_hops = np.inf ) :
sources = np.asarray( sources, np.int )
## Initialize the hops array
dist = np.full( A.shape[ 0 ], np.inf, np.float )
## THe source is immediately reachable
dist[ sources ] = 0.0
## Setup the vertex traversal schedule.
Q = deque( sources )
## Setup the counter of visited nodes
num_visited = 0
## If the allotted number of nodes has been exceeded, break the cycle.
while Q :
## Get the current vertex from the top of the FIFO queue
v = Q.popleft( )
## ... find its nerighbours (A is CSR)
N = A[ v, : ].nonzero( )[ 1 ]
## ... keep those that were not visited
N = N[ np.isinf( dist[ N ] ) ]
## Add the mto the queue
if len( N ) > 0 :
dist[ N ] = 1.0 + dist[ v ]
## Nodes farther than max_hops away from the sources are not traversed.
if 1.0 + dist[ v ] < max_hops :
Q.extend( N )
## Unless the current vertex is the source, increase the number of visited nodes.
if dist[ v ] > 0 :
num_visited += len( N )
if num_visited >= num_nodes :
break
return dist
|
mit
| 3,795,678,255,961,516,000
| 32.52459
| 81
| 0.636186
| false
| 2.942446
| false
| false
| false
|
WilJoey/ckanext-tnext
|
ckanext/tnext/controllers/MUser.py
|
1
|
9024
|
import ckan.plugins as p
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
from pylons import config
from ckan.common import _, c, g, request
c = base.c
request = base.request
class MUserController(base.BaseController):
def index (self):
LIMIT = 20
page = int(request.params.get('page', 1))
c.q = request.params.get('q', '')
c.order_by = request.params.get('order_by', 'name')
context = {'return_query': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'q': c.q,
'limit': LIMIT,
'offset': (page - 1) * LIMIT,
'order_by': c.order_by}
try:
logic.check_access('user_list', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
users_list = logic.get_action('user_list')(context, data_dict)
c.users = users_list
c.page = h.Page(
collection=users_list,
page=page,
url=h.pager_url,
item_count=users_list.count(),
items_per_page=LIMIT
)
return base.render('muser/index.html')
def new (self, data=None, errors=None, error_summary=None):
#q = model.Session.query(model.User).filter(model.User.sysadmin==True)
#c.sysadmins = [a.name for a in q.all()]
'''GET to display a form for registering a new user.
or POST the form data to actually do the user registration.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj,
'schema': self._new_form_to_db_schema(),
'save': 'save' in request.params}
c.is_sysadmin = new_authz.is_sysadmin(c.user)
if not c.user or not c.is_sysadmin:
return base.render('user/logout_first.html')
try:
logic.check_access('user_create', context)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create a user'))
if context['save'] and not data:
return self._save_new(context)
c.data = data or {}
c.errors = errors or {}
c.error_summary = error_summary or {}
#vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
#c.form = render(self.new_user_form, extra_vars=vars)
#return render('user/new.html')
return base.render('muser/new.html')
def _new_form_to_db_schema(self):
return schema.user_new_form_schema()
def _save_new(self, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
captcha.check_recaptcha(request)
user = logic.get_action('user_create')(context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create user %s') % '')
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
h.flash_error(error_msg)
return self.new(data_dict)
except logic.ValidationError, e:
c.errors = e.error_dict
c.error_summary = e.error_summary
return self.new(data_dict, c.errors, c.error_summary)
# success
h.flash_success(_('User "%s" is now registered.') % (data_dict['name']))
#return base.render('user/logout_first.html')
return base.render('muser/new.html')
def edit(self, id=None, data=None, errors=None, error_summary=None):
context = {'save': 'save' in request.params,
'schema': self._edit_form_to_db_schema(),
'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj
}
if id is None:
base.abort(400, _('No user specified'))
if not new_authz.is_sysadmin(c.user):
base.abort(401, _('User %s not authorized to edit %s') % (str(c.user), id))
data_dict = {'id': id}
try:
logic.check_access('user_update', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit a user.'))
if (context['save']) and not data:
return self._save_edit(id, context)
try:
old_data = logic.get_action('user_show')(context, data_dict)
schema = self._db_to_edit_form_schema()
if schema:
old_data, errors = validate(old_data, schema)
c.display_name = old_data.get('display_name')
c.user_name = old_data.get('name')
data = data or old_data
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % '')
except logic.NotFound:
base.abort(404, _('User not found'))
user_obj = context.get('user_obj')
errors = errors or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
self._setup_template_variables({'model': model,
'session': model.Session,
'user': c.user or c.author},
data_dict)
c.is_myself = True
c.show_email_notifications = h.asbool(
config.get('ckan.activity_streams_email_notifications'))
c.form = base.render('muser/edit_user_form.html', extra_vars=vars)
return base.render('muser/edit.html')
def _save_edit(self, id, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
# MOAN: Do I really have to do this here?
if 'activity_streams_email_notifications' not in data_dict:
data_dict['activity_streams_email_notifications'] = False
user = logic.get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
user_index = h.url_for(controller='ckanext.tnext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % id)
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except logic.ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def _setup_template_variables(self, context, data_dict):
c.is_sysadmin = new_authz.is_sysadmin(c.user)
try:
user_dict = logic.get_action('user_show')(context, data_dict)
except logic.NotFound:
base.abort(404, _('User not found'))
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
c.user_dict = user_dict
c.is_myself = user_dict['name'] == c.user
c.about_formatted = h.render_markdown(user_dict['about'])
def _db_to_edit_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _edit_form_to_db_schema(self):
return schema.user_edit_form_schema()
def delete(self, id):
'''Delete user with id passed as parameter'''
context = {'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
logic.get_action('user_delete')(context, data_dict)
h.flash_success(_('User deleted!'))
user_index = h.url_for(controller='ckanext.tnext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
msg = _('Unauthorized to delete user with id "{user_id}".')
base.abort(401, msg.format(user_id=id))
|
mit
| -4,200,688,493,169,835,500
| 36.915966
| 112
| 0.566379
| false
| 3.802781
| false
| false
| false
|
newtrino/vertigo
|
tests/test7.py
|
1
|
1747
|
#!/usr/bin/python
""" This will be a test for rave's two-jet feature """
width=0.0015
length=5.3
from vertigo import RaveVertexFactory, EventFactory, RaveConstantMagneticField, \
RaveVacuumPropagator, LoopSettings, RaveTrackContainer, WeightedRaveTrack, \
ObserverManager_Instance, RaveCovariance3D, RaveEllipsoid3D, RavePoint3D
c=RaveCovariance3D( width**2, 0,0,width**2,0,length**2 )
e=RaveEllipsoid3D( RavePoint3D(),c )
LoopSettings.Instance().setVerbosity(0)
ravefactory=RaveVertexFactory ( RaveConstantMagneticField(0.,0.,4.), RaveVacuumPropagator(), e, "avr-primcut:3.0" )
eventfactory=EventFactory ( "bjets.170.1.txt.gz" )
event=eventfactory.next()
for simvtx in event.simVertices():
print simvtx
print len(event.jets()),"jets in event."
print len(event.tracks()),"tracks in event."
primaries=RaveTrackContainer()
first=True
secondaries=None
for jet in event.jets():
tracks=jet.tracks()
print len(tracks), "tracks in jet."
if not first: # put all tracks but of "first jet" in "primaries"
for track in tracks:
primaries.append ( track )
else:
secondaries=tracks
first=False
vertices=ravefactory.create ( event.tracks(), True )
print len(vertices),"vertices with all tracks"
for vtx in vertices:
print "Vtx Pos: (%.4f, %.4f, %.4f)" % (vtx.position().x(),vtx.position().y(),vtx.position().z() )
print len(primaries),"primary tracks."
vertices=ravefactory.create ( primaries, secondaries, True )
# vertices=ravefactory.create ( primaries, True )
print len(vertices),"vertices with all tracks"
for vtx in vertices:
print "Vtx Pos: (%.4f, %.4f, %.4f)" % (vtx.position().x(),vtx.position().y(),vtx.position().z() )
# obs=ObserverManager_Instance().get("EventPrinter")
# obs.process(event)
|
gpl-2.0
| -6,821,613,297,838,847,000
| 31.351852
| 115
| 0.721809
| false
| 2.951014
| false
| false
| false
|
rubik/poly
|
tests/test_poly.py
|
1
|
9508
|
import copy
import unittest
from poly import Poly, monomial
def pass_to(func, convert=(True, True), star=False):
def wrapper(meth):
def inner(self):
for value, expected in meth(self).items():
if convert[0] and not star:
value = Poly(value)
if convert[1]:
if isinstance(expected, tuple):
expected = tuple(map(Poly, expected))
else:
expected = Poly(expected)
val = func(*map(Poly, value)) if star else func(value)
self.assertEqual(val, expected)
return inner
return wrapper
class TestPypolFuncs(unittest.TestCase):
def test_monomial(self):
self.assertEqual(monomial(1, 1), Poly([(1, 1)]))
self.assertEqual(monomial(-1, 0), Poly([(-1, 0)]))
self.assertEqual(monomial(0, 0), Poly([]))
self.assertEqual(monomial(1, 2), Poly([(1, 2)]))
@pass_to(Poly.from_string, (False, True))
def test_parse(self):
return {
'3x - 2': [(3, 1), (-2, 0)],
'x + 1': [(1, 1), (1, 0)],
'4x**2 + x - 1': [(4, 2), (1, 1), (-1, 0)],
'-2x^3 + x**2 -x + 1': [(-2, 3), (1, 2), (-1, 1), (1, 0)],
'- x ^ 3 + 2': [(-1, 3), (2, 0)],
'4 x': [(4, 1)],
'- 5 x ^ 3 + 1 - 4': [(-5, 3), (-3, 0)],
'-x - x^2': [(-1, 2), (-1, 1)],
'x + x - 3x': [(-1, 1)],
}
class TestPypolPoly(unittest.TestCase):
@pass_to(Poly.__repr__, (True, False))
def test_repr(self):
return {
((1, 2), (4, 1), (-2, 0)): '+ x^2 + 4x - 2',
((-3, 4), (-1, 2)): '- 3x^4 - x^2',
((-2, 2), (3, 1)): '- 2x^2 + 3x',
((2, 0),): '+ 2',
((1, 1),): '+ x',
((-1, 10),): '- x^10',
(): '0',
((-1, 0),): '- 1'
}
@pass_to(Poly.degree.fget, (True, False))
def test_degree(self):
return {
((-3, 2), (4, 0)): 2,
((4, 3), (0, 5), (0, 7), (9, 2)): 3,
((-1, 0),): 0,
((3, 2), (4, 1)): 2,
(): 0,
}
@pass_to(Poly.rhs.fget, (True, False))
def test_rhs(self):
return {
((-3, 4), (4, 2)): 0,
((-1, 0),): -1,
((9, 0), (-3, 2), (4, 2), (-5, 1)): 9,
((2, 2), (0, 0)): 0,
}
@pass_to(Poly.append, star=True)
def test_append(self):
return {
(((2, 3), (-3, 4)), ((1, 4), (2, 2))): [(-2, 4), (2, 3), (2, 2)],
(((-2, 3), (1, 2), (1, 1)), ((3, 2),)): [(-2, 3), (4, 2), (1, 1)],
(((3, 1),), ((-5, 1),)): [(-2, 1)],
(((4, 2), (-1, 1)), ()): [(4, 2), (-1, 1)],
}
@pass_to(Poly.is_num, (True, False))
def test_is_num(self):
return {
((-2, 0),): True,
((9, 9), (0, 4)): False,
((1, 1), (1, 0)): False,
((0, 0),): True,
}
@pass_to(Poly.simplify, (False, False))
def test_simplify(self):
return {
((1, 2), (3, 0), (-1, 0)): [(1, 2), (2, 0)],
((-3, 2), (-4, 2), (0, 4), (-2, 1)): [(-7, 2), (-2, 1)],
((0, 2),): [],
((4, 4), (-4, 4)): [],
((2, 1), (-8, 0)): [(2, 1), (-8, 0)]
}
@pass_to(copy.copy)
def test_copy(self):
return {
((1, 4), (-1, 0)): [(1, 4), (-1, 0)],
((-1, 2), (2, 3), (4, 1)): [(2, 3), (-1, 2), (4, 1)],
((3, 2),): [(3, 2)],
}
@pass_to(copy.deepcopy)
def test_deepcopy(self):
return {
((1, 4), (-1, 0)): [(1, 4), (-1, 0)],
((-1, 2), (2, 3), (4, 1)): [(2, 3), (-1, 2), (4, 1)],
((3, 2),): [(3, 2)],
}
def test_getitem(self):
self.assertEqual(Poly([(1, 2), (-1, 0)])[0], Poly([(1, 2)]))
self.assertEqual(Poly([(-3, 0), (4, 4)])[0], Poly([(4, 4)]))
self.assertEqual(Poly([(1, 1), (2, 0), (3, 2)])[1:],
Poly([(1, 1), (2, 0)]))
self.assertEqual(Poly([(-2, 3), (1, 2), (-1, 0)])[2:3],
Poly([(-1, 0)]))
@pass_to(Poly.__nonzero__, (True, False))
def test_nonzero(self):
return {
(): False,
((1, 0),): True,
((0, 0),): False,
((1, 1), (-3, 1), (4, 2)): True,
}
@pass_to(Poly.__bool__, (True, False))
def test_nonzero(self):
return {
(): False,
((1, 0),): True,
((0, 0),): False,
((1, 1), (-3, 1), (4, 2)): True,
}
@pass_to(len, (True, False))
def test_len(self):
return {
(): 0,
((0, 0),): 0,
((1, 0),): 1,
((1, 4), (-1, 4), (1, 1)): 1,
((3, 2), (4, 1)): 2,
((1, 4), (-1, 3), (1, 2), (-1, 1), (1, 0)): 5
}
@pass_to(Poly.__eq__, (True, False), True)
def test_eq(self):
return {
(((1, 3), (-1, 2)), ((1, 3), (2, 2), (-3, 2))): True,
(((1, 3), (4, 2)), ((1, 3), (-4, 2))): False,
(((1, 0),), ((1, 0),)): True,
((), ()): True,
}
@pass_to(Poly.__ne__, (True, False), True)
def test_ne(self):
return {
(((1, 3), (-1, 2)), ((1, 3), (2, 2), (-3, 2))): False,
(((1, 3), (4, 2)), ((1, 3), (-4, 2))): True,
(((1, 0),), ((1, 0),)): False,
((), ()): False,
}
@pass_to(Poly.__pos__)
def test_pos(self):
return {
(): [],
((1, 0), (-1, 1)): [(1, 0), (-1, 1)],
((3, 2), (-3, 2), (4, 1)): [(4, 1)],
}
@pass_to(Poly.__neg__)
def test_neg(self):
return {
((1, 1),): [(-1, 1)],
((2, 4), (-3, 5)): [(3, 5), (-2, 4)],
((3, 1), (1, 1)): [(-4, 1)],
((1, 1),): [(-1, 1)],
}
@pass_to(Poly.__add__, star=True)
def test_add(self):
return {
(((3, 2), (4, 1)), ((1, 2), (-1, 1))): [(4, 2), (3, 1)],
(((1, 2), (3, 3)), ((2, 4), (-1, 3))): [(2, 4), (2, 3), (1, 2)],
(((3, 3),), ((-3, 3),)): [],
(((1, 1), (-2, 4)), ((3, 1), (2, 4))): [(4, 1)],
((), ((-3, 2),)): [(-3, 2)],
}
@pass_to(Poly.__sub__, star=True)
def test_sub(self):
return {
(((3, 2), (4, 1)), ((1, 2), (-1, 1))): [(2, 2), (5, 1)],
(((1, 2), (3, 3)), ((2, 4), (3, 3))): [(-2, 4), (1, 2)],
(((3, 3),), ((-3, 3),)): [(6, 3)],
(((1, 1), (-2, 4)), ((3, 1), (2, 4))): [(-4, 4), (-2, 1)],
((), ((-3, 2),)): [(3, 2)],
}
@pass_to(Poly.__mul__, star=True)
def test_mul(self):
return {
(((1, 1), (-1, 0)), ((1, 1), (-1, 0))): [(1, 2), (-2, 1), (1, 0)],
(((1, 0),), ((2, 3), (-1, 4))): [(-1, 4), (2, 3)],
(((-1, 1),), ((2, 3), (-1, 4))): [(1, 5), (-2, 4)]
}
@pass_to(divmod, star=True)
def test_divmod(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
([(3, 1), (-11, 0)], [(28, 1), (30, 0)]),
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
([(-1, 2), (1, 1)], [(-5, 0)]),
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
([(1, 1), (-3, 0)], [(-21, 0)]),
(((6, 0),), ((2, 0),)): ([(3, 0)], []),
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
([(2, 2), (-1, 1), (1, 0)], []),
((), ()): ([], []),
}
def test_divmod_value_error(self):
self.assertRaises(ValueError, divmod,
Poly([(1, 2), (-3, 1)]), Poly([(3, 3), (4, 0)]))
@pass_to(Poly.__div__, star=True)
def test_div(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
[(3, 1), (-11, 0)],
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
[(-1, 2), (1, 1)],
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
[(1, 1), (-3, 0)],
(((6, 0),), ((2, 0),)): [(3, 0)],
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
[(2, 2), (-1, 1), (1, 0)],
((), ()): [],
}
@pass_to(Poly.__mod__, star=True)
def test_mod(self):
return {
(((3, 3), (-2, 2), (4, 1), (-3, 0)), ((1, 2), (3, 1), (3, 0))):
[(28, 1), (30, 0)],
(((1, 3), (-2, 2), (1, 1), (-5, 0)), ((-1, 1), (1, 0))):
[(-5, 0)],
(((1, 2), (8, 1), (-54, 0)), ((1, 1), (11, 0))):
[(-21, 0)],
(((6, 0),), ((2, 0),)): [],
(((4, 2), (-2, 1), (2, 0)), ((2, 0),)):
[],
((), ()): [],
}
def test_pow(self):
self.assertRaises(TypeError, lambda: Poly([(2, 2), (-1, 1)]) ** -1)
self.assertRaises(TypeError, lambda: Poly([]) ** 0)
self.assertEqual(Poly([(1, 3), (2, 1)]) ** 1, Poly([(1, 3), (2, 1)]))
self.assertEqual(Poly([(1, 1), (-1, 0)]) ** 2,
Poly([(1, 2), (-2, 1), (1, 0)]))
self.assertEqual(Poly([(1, 3), (-1, 2)]) ** 0, Poly([(1, 0)]))
self.assertEqual(Poly([(1, 1)]) ** 3, Poly([(1, 3)]))
self.assertEqual(Poly([(1, 4)]) ** 3, Poly([(1, 12)]))
if __name__ == '__main__':
unittest.main()
|
mit
| 8,370,560,494,488,886,000
| 32.361404
| 78
| 0.30101
| false
| 2.837362
| true
| false
| false
|
sandialabs/BioCompoundML
|
bcml/PubChemUtils/pubchempy_utils.py
|
1
|
14579
|
from __future__ import print_function
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from collections import Callable, defaultdict
from six.moves import xrange
try:
# For Python 3.0 and later
import urllib.request as urllib2
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from bs4 import BeautifulSoup
from time import sleep
_base = 'pubchem.ncbi.nlm.nih.gov'
_pug_rest = '"http://pubchem.ncbi.nlm.nih.gov/pug_rest"'
_dir = os.path.dirname(__file__)
_fp_file = os.path.abspath(os.path.join(_dir, 'fingerprints.txt'))
'''
This module extends the common functionality of the PubChemPy
package
'''
class CompoundDict(OrderedDict):
'''
The compound dictionary is ordred and contains various levels of
dictionaries underneath, this is the reason for the complicated structure
'''
def __init__(self, default_factory=defaultdict, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('First argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(tuple(self.items())))
def __repr__(self):
return 'CompoundDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
def verbose_print(verbose, line):
if verbose:
print(line)
def _url_factory(uri):
'''
Handle the pubchem RESTful interface by passing a url directly
'''
uri = 'http://' + _base + uri
response = urllib2.urlopen(uri)
value = response.read().strip()
return value
def convert_cactvs(cactvs):
'''
This internal function converts 2D fingerprints to a string of 0/1s
The fingerprint is defined here:
ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
The way that this function works is:
1) Pass cactvs
2) Strip the 2 trailing bytes
3) Strip the 2 leading bytes
4) Convert the letters to base64 binary (6-bits)
5) Report bits 32 through (881+32-11), which are the 881 informative
bits.
'''
b64 = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6,
"H": 7, "I": 8, "J": 9, "K": 10, "L": 11, "M": 12, "N": 13,
"O": 14, "P": 15, "Q": 16, "R": 17, "S": 18, "T": 19,
"U": 20, "V": 21, "W": 22, "X": 23, "Y": 24, "Z": 25,
"a": 26, "b": 27, "c": 28, "d": 29, "e": 30, "f": 31,
"g": 32, "h": 33, "i": 34, "j": 35, "k": 36, "l": 37,
"m": 38, "n": 39, "o": 40, "p": 41, "q": 42, "r": 43,
"s": 44, "t": 45, "u": 46, "v": 47, "w": 48, "x": 49,
"y": 50, "z": 51, "0": 52, "1": 53, "2": 54, "3": 55,
"4": 56, "5": 57, "6": 58, "7": 59, "8": 60, "9": 61,
"+": 62, "/": 63}
c = cactvs[:-2].strip()
binstring = (''.join([str(bin(b64[x]))[2:].zfill(6) for x in c.decode('utf-8')]))
return binstring[32:-11]
def _parse_fingerprint():
'''
Read the NCBI fingerprint spec file and assign features to
each fingerprint
'''
fp_features = {}
with open(_fp_file) as fp:
for line in fp:
(pos, feature) = line.strip().split('\t')
fp_features[int(pos)] = feature
return fp_features
def get_binhash(cactvs):
'''
Convert CACTVS into a dictionary of fingerprint
features
'''
fingerprint = _parse_fingerprint()
binstring = convert_cactvs(cactvs)
binhash = {}
for count, val in enumerate(binstring):
binhash[fingerprint[count]] = val
return binhash
def cactvs_uri(ids):
'''
This function retreives the CACTVS uri from PubChem, which is a base64
encoded string, specifying the 881 bits, corresponding to the
fingerprint
'''
_id = str(ids)
uri = '/rest/pug/compound/cid/' + _id + '/property/Fingerprint2D/TXT'
return uri
def smiles_uri(ids):
_id = str(ids)
uri = '/rest/pug/compound/smiles/' + _id + '/cids/TXT'
return uri
def get_smiles(_id):
'''
This function retreives the CID for a SMILES from PubChem
'''
uri = smiles_uri(_id)
cid = _url_factory(uri)
return cid
def stream_sdf(ids):
'''
This function allows bulk streaming of SDF into a data structure
'''
concatenated_ids = ','.join(ids)
uri = sdf_uri(concatenated_ids)
sdf_stream = _url_factory(uri).decode().strip('$$$$')
sdfs = ["".join((data.lstrip(), '$$$$')) for data in
sdf_stream.split('$$$$') if data is not ""]
return sdfs
def sdf_uri(ids):
'''
This function retreives the SDF URI from PubChem
'''
_id = str(ids)
uri = '/rest/pug/compound/cid/' + _id + '/record/SDF'
return uri
def stream_xml(_id):
'''
This function allows streaming of pubchem XML into a data structure
'''
uri = xml_uri(_id)
xml = _url_factory(uri)
return xml
def xml_uri(_id):
'''
This function retreives the XML URI from PubChem
'''
_id = str(_id)
uri = '/rest/pug_view/data/compound/' + _id + '/XML/'
return uri
def extract_pubchem_xml_features(xml):
'''
Extracts primary PubChem Chemical and Physical data.
If multiple values are reported
for a given descriptor, the first is given, since, by
convention, these are the highest quality.
'''
xml_glob = BeautifulSoup(xml, "lxml")
values = {}
def _return_value_list(text, key):
'''Special function for returning list of values'''
return [y.get_text() for y in text.find_next_siblings(key)]
xml_globs = xml_glob.find_all('section')
properties = ''
match_text = 'Chemical and Physical Properties'
for xml_glob in xml_globs:
try:
if xml_glob.find('tocheading').get_text() == match_text:
properties = xml_glob
except:
pass
try:
for x in properties.find_all('name'):
value = None
name = x.get_text()
if name not in values:
if x.find_next_sibling('numvalue'):
value = x.find_next_sibling('numvalue').get_text()
if x.find_next_sibling('stringvalue'):
value = x.find_next_sibling('stringvalue').get_text()
if x.find_next_siblings('stringvaluelist'):
value = _return_value_list(x, 'stringvaluelist')
if x.find_next_siblings('numvaluelist'):
value = _return_value_list(x, 'stringvaluelist')
if value:
values[name] = value
except:
pass
return values
class Collect(object):
"""Initialize variables for Collect class"""
def __init__(self, compounds, fingerprint=False,
xml=False, sdf=False, proxy=False, user=False,
id_name='PubChem', chunks=False, try_count=3, verbose=False,
predictors=False, weights=False, smiles=False):
self.id_name = id_name
self.compounds = compounds
self.pubchem_ids = [x[id_name] for x in compounds]
self.compound = CompoundDict()
self.proxy = proxy
self.chunks = chunks
self.verbose = verbose
self.smiles = smiles
if proxy:
self.set_proxy()
if smiles is not False:
id_list = []
for count, _id in enumerate(self.pubchem_ids):
cid = get_smiles(_id)
id_list.append(cid)
self.compounds[count][id_name] = cid
self.pubchem_ids = id_list
if predictors is not False:
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['predictor'] = predictors[count]
if weights is not False:
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['weight'] = weights[count]
self.user = user
if user:
self.add_user()
self.verbose = verbose
self.fingerprint = fingerprint
if fingerprint:
self.add_fingerprint(fingerprint=True)
self.sdf = sdf
if sdf:
self.add_sdf()
self.xml = xml
if xml:
self.add_xml()
def set_proxy(self, proxy=False):
"""This function sets the proxy for the urllib2 library"""
if self.proxy:
proxy = self.proxy
if proxy is not False:
verbose_print(self.verbose, "Initializing proxy")
result = urlparse(proxy)
assert result.scheme, "Proxy must be a web address"
proxy_support = urllib2.ProxyHandler({
'http': proxy,
'https': proxy
})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
def add_user(self, user=False):
"""This function allows user features to be passed
through the Collect Class"""
if self.user:
user = self.user
if user is True:
verbose_print(self.verbose, "Adding user provided features")
for count, _id in enumerate(self.pubchem_ids):
self.compound[_id]['userhash'] = self.compounds[count]['userhash']
def add_fingerprint(self, fingerprint=False, chunks=False):
"""This function collects fingerprint data from NCBI, currently
PubChemPy collects only ASN.1 data, which is difficult to parse
into a binary hash of fingerprint values. It also doesn't allows
bulk collection of the fingerprints. This function allows these"""
if self.fingerprint:
fingerprint = self.fingerprint
if self.chunks:
chunks = self.chunks
if fingerprint is True:
ids = self.pubchem_ids
verbose_print(self.verbose, "Getting fingerprints from NCBI")
fps = []
percent = 0.
length = float(len(self.pubchem_ids))
if length > 100 and chunks is False:
chunks = 100.
if chunks is not False:
for chunk_id in [ids[i:i + chunks] for i in xrange(0, len(ids), chunks)]:
'''This loop allows the ids to be chunked into size chunks. This is
important for really long lists, which may create problems in trying
to query huge numbers of ids'''
percent = percent + float(chunks) / length
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
concatenated_ids = ','.join(chunk_id)
uri = cactvs_uri(concatenated_ids)
fps.extend(_url_factory(uri).splitlines())
else:
concatenated_ids = ','.join(ids)
verbose_print(self.verbose, 'Collecting all fingerprints')
uri = cactvs_uri(concatenated_ids)
fps = _url_factory(uri).splitlines()
for i, cactvs in enumerate(fps):
self.compound[ids[i]]['binhash'] = get_binhash(cactvs)
def add_sdf(self, sdf=False, chunks=False):
"""This function collects NCBI sdfs and stores them for use
in cheminformatic tools"""
if self.sdf:
sdf = self.sdf
if self.chunks:
chunks = self.chunks
if sdf is True:
percent = 0.
length = float(len(self.pubchem_ids))
ids = self.pubchem_ids
if length > 100 and chunks is False:
chunks = 100
if chunks is not False:
for chunk_id in [ids[i:i + chunks] for i in xrange(0, len(ids), chunks)]:
'''This loop allows the ids to be chunked into size chunks. This is
important for really long lists, which may create problems in trying
to query huge numbers of ids'''
percent = percent + chunks / length
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
concatenated_ids = chunk_id
sdfs = stream_sdf(concatenated_ids)
for i, sdf in enumerate(sdfs):
self.compound[chunk_id[i]]['sdf'] = sdf
else:
sdfs = stream_sdf(ids)
for i, sdf in enumerate(sdfs):
self.compound[ids[i]]['sdf'] = sdf
def add_xml(self, xml=False, try_count=3):
"""This function collects NCBI XML and stores them for later parsing"""
if self.xml:
xml = self.xml
if xml is True:
percent = 0.
length = float(len(self.pubchem_ids))
ids = self.pubchem_ids
verbose_print(self.verbose, 'Collecting all XMLs')
for count, _id in enumerate(ids):
percent = float(count) / float(length)
#print_string = '{:2.1%} out of {}'.format(percent, length)
#verbose_print(self.verbose, print_string)
val = False
count = 0
while (val is False) and (count < try_count):
try:
xml_stream = stream_xml(_id)
self.compound[_id]['xml'] = extract_pubchem_xml_features(xml_stream)
val = True
except:
sleep(5)
count = count + 1
|
bsd-3-clause
| 4,310,988,585,706,478,600
| 34.13012
| 92
| 0.554016
| false
| 3.836579
| false
| false
| false
|
averaart/fq_delta
|
fq_delta/fq_delta.py
|
1
|
10373
|
__author__ = 'averaart'
"""This module offers a means to store multiple versions of the same fastq file, by only storing the differences between
them and recreating the processed file based on the original file and the differences."""
# Batteries included
import os
import sys
from subprocess import Popen, PIPE
import hashlib
import zipfile
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except ImportError:
compression = zipfile.ZIP_STORED
# 3rd party imports
import diff_match_patch as dmp_module
class InputError(Exception):
pass
class ChecksumError(Exception):
pass
# global variables
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 0.0005 # default is 1
dmp.Match_Distance = 1000 # default is 1000
dmp.Match_MaxBits = 0 # default is 32, 0 is advised for python
def _open(name):
"""Opens a file, or streams an unquiping archive."""
if name[-3:] == '.qp':
return Popen('unquip -c ' + name, shell=True, stdout=PIPE).stdout
else:
try:
return open(name, 'r')
except IOError:
print "Couldn't find the file..."
def create_delta(original_file=sys.stdin, processed_file=sys.stdin, delta_filename='', output_processed=False):
"""This function creates a delta file based on an original file and a processed file. Either files could come from
standard in."""
if isinstance(processed_file, str):
processed_file = _open(processed_file)
if delta_filename == '':
delta_filename = processed_file.name
delta_file = DeltaFile('w', delta_filename, original_file)
for line in processed_file:
delta_file.write(line)
if output_processed:
print line,
delta_file.close()
def rebuild_fastq(delta_filename, original_file=sys.stdin, out=sys.stdout, to_stdout=False):
"""Recreates the processed file from the original and delta files."""
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
original_file = _open(original_file)
processed_file = DeltaFile('r', delta_filename, original_file)
if isinstance(out, str):
out = open(out, 'w')
if out == sys.stdout:
to_stdout = False
for line in processed_file:
out.write(line + '\n')
if to_stdout:
sys.stdout.write(line + '\n')
class DeltaFile():
def __init__(self, mode, delta_filename, original_file=sys.stdin, processed_file=sys.stdin, reuse=False):
self.leftover = list()
self.mode = mode
self.reuse = reuse
# Open an existing deltafile to read the processed file
if self.mode == 'r':
self.delta_filename = delta_filename
self.buffer = list()
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
self.original_file = _open(original_file)
else:
self.original_file = original_file
self.md5 = hashlib.md5()
# Extract the checksum and delta files.
# If there is no checksum file in the zipfile, bail out.
# ("I'm not touching that with a 10 foot pole!")
zf = zipfile.ZipFile(delta_filename)
namelist = zf.namelist()
if 'md5_checksum' not in namelist:
raise ChecksumError('No checksum found.')
else:
namelist.pop(namelist.index("md5_checksum"))
self.checksum = zf.open('md5_checksum', "r").read()
# For the delta file, first assume the filename is the same as the archive's name
# minus ".zip". If that fails, find the first file that contains the word "delta".
# Else just extract the first file you can find. Ugly, I know... :D
self.filename = self.delta_filename.rpartition('.')[0]
try:
zf.extract(self.filename)
except KeyError:
delta_names = [s for s in namelist if "delta" in s]
if len(delta_names) > 0:
self.filename = delta_names[0]
else:
self.filename = namelist[0]
zf.extract(self.filename)
self.deltas = open(self.filename, "r")
# Write a new deltafile from the processed data.
elif self.mode == 'w':
# Raise an Exception if both "files" turn out to be sys.stdin.
if original_file == sys.stdin and processed_file == sys.stdin:
raise InputError("Only one of the inputfiles can be STDIN.")
# Convert file names to files, and open quip-files while we're at it.
if isinstance(original_file, str):
self.original_file = _open(original_file)
else:
self.original_file = original_file
if isinstance(processed_file, str):
self.processed_file = _open(processed_file)
else:
self.processed_file = processed_file
self.md5 = hashlib.md5()
if delta_filename == '':
self.delta_filename = processed_file.name
else:
self.delta_filename = delta_filename
# Remove .zip if entered as delta_filename argument.
# It'll be added when the file is zipped.
if self.delta_filename[-4:] == '.zip':
self.delta_filename = self.delta_filename[:-4]
self.delta_file = open(self.delta_filename, 'a')
else:
raise Exception('Illegal mode: ' + str(mode))
def __iter__(self):
return self
def reset(self):
self.deltas.seek(0)
self.original_file.seek(0)
self.leftover = list()
self.md5 = hashlib.md5()
def next(self):
self.check_reading()
if self.original_file.closed or self.deltas.closed:
raise IOError("Trying to iterate over closed files...")
while len(self.buffer) <= 0:
while len(self.buffer) < 4:
delta = ''
t1 = ''
t2 = ''
t1 = self.original_file.readline()
delta = self.deltas.readline().strip()
if delta == '':
# End of File
# Check the checksum...
if not self.md5.digest() == self.checksum:
self.close()
raise ChecksumError("Checksum did not match!")
if self.reuse:
self.reset()
else:
# Clean up the uncompressed delta file
self.deltas.close()
os.remove(self.filename)
# Kill the iterator
raise StopIteration
diff = dmp.diff_fromDelta(t1.strip(), delta.strip())
t2 = dmp.diff_text2(diff)
self.buffer.append(t2)
# Check if the read was removed. If so, clear the buffer so the next four lines are read.
if self.buffer == ['', '', '', '']:
self.buffer = list()
nextline = self.buffer.pop(0)
self.md5.update(nextline)
return nextline
def readline(self):
self.check_reading()
return self.next()
def readlines(self):
self.check_reading()
return [line for line in self]
def writelines(self, lines, output_processed=False, close_file=False):
lines = self.leftover + lines
while len(lines) >= 4:
id1 = self.original_file.readline().strip()
id2 = lines.pop(0).strip()
seq1 = self.original_file.readline().strip()
seq2 = lines.pop(0).strip()
com1 = self.original_file.readline().strip()
com2 = lines.pop(0).strip()
qua1 = self.original_file.readline().strip()
qua2 = lines.pop(0).strip()
if id2 == '':
break
self.md5.update(id2)
self.md5.update(seq2)
self.md5.update(com2)
self.md5.update(qua2)
while id1.partition('\t')[0] != id2.partition('\t')[0]:
self.delta_file.write('-' + str(len(id1.strip())) + '\n')
self.delta_file.write('-' + str(len(seq1.strip())) + '\n')
self.delta_file.write('-' + str(len(com1.strip())) + '\n')
self.delta_file.write('-' + str(len(qua1.strip())) + '\n')
id1 = self.original_file.readline().strip()
seq1 = self.original_file.readline().strip()
com1 = self.original_file.readline().strip()
qua1 = self.original_file.readline().strip()
if id1 == '':
break
for (t1, t2) in ((id1, id2), (seq1, seq2), (com1, com2), (qua1, qua2)):
diff = dmp.diff_main(t1.strip(), t2.strip())
delta = dmp.diff_toDelta(diff) + '\n'
self.delta_file.write(delta)
if output_processed:
print t2
self.leftover = lines
if close_file:
self.close()
def write(self, string, output_processed=False, close_file=False):
lines = string.strip().split('\n')
self.writelines(lines, output_processed, close_file)
def close(self):
if self.mode is 'r':
if not self.deltas.closed:
self.deltas.close()
try:
os.remove(self.filename)
except OSError:
pass
else:
self.delta_file.close()
# Copy the delta file to a compressed archive, and remove the delta file
self.zf = zipfile.ZipFile(self.delta_filename + '.zip', mode='w')
try:
self.zf.write(self.delta_filename, self.delta_filename.rpartition('/')[2], compress_type=compression)
self.zf.writestr('md5_checksum', self.md5.digest(), compress_type=compression)
os.remove(self.delta_filename)
finally:
self.zf.close()
def check_reading(self):
if self.mode is not 'r':
raise IOError('File not open for reading')
|
bsd-3-clause
| 63,940,633,616,006,200
| 33.347682
| 120
| 0.550757
| false
| 4.129379
| false
| false
| false
|
ChinaQuants/blaze
|
blaze/expr/core.py
|
1
|
10471
|
from __future__ import absolute_import, division, print_function
import numbers
import toolz
import inspect
from toolz import unique, concat, compose, partial
import toolz
from pprint import pprint
from ..compatibility import StringIO, _strtypes, builtins
from ..dispatch import dispatch
__all__ = ['Node', 'path', 'common_subexpression', 'eval_str']
base = (numbers.Number,) + _strtypes
def isidentical(a, b):
""" Strict equality testing
Different from x == y -> Eq(x, y)
>>> isidentical(1, 1)
True
>>> from blaze.expr import symbol
>>> x = symbol('x', 'int')
>>> isidentical(x, 1)
False
>>> isidentical(x + 1, x + 1)
True
>>> isidentical(x + 1, x + 2)
False
>>> isidentical((x, x + 1), (x, x + 1))
True
>>> isidentical((x, x + 1), (x, x + 2))
False
"""
if isinstance(a, base) and isinstance(b, base):
return a == b
if type(a) != type(b):
return False
if isinstance(a, Node):
return all(map(isidentical, a._args, b._args))
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
return len(a) == len(b) and all(map(isidentical, a, b))
return a == b
class Node(object):
""" Node in a tree
This serves as the base class for ``Expr``. This class holds all of the
tree traversal functions that are independent of tabular or array
computation. This is everything that we can do independent of the problem
domain. Note that datashape is not imported.
See Also
--------
blaze.expr.expressions.Expr
"""
__slots__ = ()
__inputs__ = '_child',
def __init__(self, *args, **kwargs):
assert frozenset(kwargs).issubset(self.__slots__)
for slot, arg in zip(self.__slots__[1:], args):
setattr(self, slot, arg)
for key, value in kwargs.items():
setattr(self, key, value)
@property
def _args(self):
return tuple(getattr(self, slot) for slot in self.__slots__[1:])
@property
def _inputs(self):
return tuple(getattr(self, i) for i in self.__inputs__)
def _leaves(self):
""" Leaves of an expression tree
All nodes without inputs. Leaves are returned in order, left to right.
>>> from blaze.expr import symbol, join, by
>>> t = symbol('t', 'var * {id: int32, name: string}')
>>> t._leaves()
[t]
>>> by(t.name, count=t.id.nunique())._leaves()
[t]
>>> v = symbol('v', 'var * {id: int32, city: string}')
>>> join(t, v)._leaves()
[t, v]
"""
if not self._inputs:
return [self]
else:
return list(unique(concat(i._leaves() for i in self._inputs if
isinstance(i, Node))))
isidentical = isidentical
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash((type(self), self._args))
return self._hash
def __str__(self):
rep = ["%s=%s" % (slot, _str(arg))
for slot, arg in zip(self.__slots__[1:], self._args)]
return "%s(%s)" % (type(self).__name__, ', '.join(rep))
def __repr__(self):
return str(self)
def _traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (arg._traverse() if isinstance(arg, Node) else [arg]
for arg in self._args)
for trav in traversals:
for item in trav:
yield item
def _subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount + 3
>>> expr._subs({3: 4, 'amount': 'id'}).isidentical(t.id + 4)
True
"""
return subs(self, d)
def _resources(self):
return toolz.merge([arg._resources() for arg in self._args
if isinstance(arg, Node)])
def _subterms(self):
return subterms(self)
def __contains__(self, other):
return other in set(self._subterms())
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.__init__(*state)
def __eq__(self, other):
ident = self.isidentical(other)
if ident is True:
return ident
try:
return self._eq(other)
except AttributeError:
# e.g., we can't compare whole tables to other things (yet?)
pass
return False
def __ne__(self, other):
return self._ne(other)
def __lt__(self, other):
return self._lt(other)
def __le__(self, other):
return self._le(other)
def __gt__(self, other):
return self._gt(other)
def __ge__(self, other):
return self._ge(other)
def __add__(self, other):
return self._add(other)
def __radd__(self, other):
return self._radd(other)
def __mul__(self, other):
return self._mul(other)
def __rmul__(self, other):
return self._rmul(other)
def __div__(self, other):
return self._div(other)
def __rdiv__(self, other):
return self._rdiv(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self, other):
return self._floordiv(other)
def __rfloordiv__(self, other):
return self._rfloordiv(other)
def __sub__(self, other):
return self._sub(other)
def __rsub__(self, other):
return self._rsub(other)
def __pow__(self, other):
return self._pow(other)
def __rpow__(self, other):
return self._rpow(other)
def __mod__(self, other):
return self._mod(other)
def __rmod__(self, other):
return self._rmod(other)
def __or__(self, other):
return self._or(other)
def __ror__(self, other):
return self._ror(other)
def __and__(self, other):
return self._and(other)
def __rand__(self, other):
return self._rand(other)
def __neg__(self):
return self._neg()
def __invert__(self):
return self._invert()
def __abs__(self):
from .math import abs
return abs(self)
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
return 'partial(%s, %s)' % (get_callable_name(o.func),
', '.join(map(str, o.args)))
try:
# python 3 makes builtins look nice
return o.__qualname__
except AttributeError:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return "'%s'" % s
elif callable(s):
return get_callable_name(s)
elif isinstance(s, Node):
return str(s)
elif isinstance(s, (list, tuple)):
body = ", ".join(_str(x) for x in s)
return "({0})".format(body if len(s) > 1 else (body + ","))
else:
stream = StringIO()
pprint(s, stream=stream)
return stream.getvalue().rstrip()
@dispatch(Node)
def subterms(expr):
return concat([[expr], concat(map(subterms, expr._inputs))])
@dispatch(object)
def subterms(x):
yield x
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = dict((k, v) for k, v in d.items() if k is not v)
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except TypeError:
pass
return _subs(o, d)
@dispatch((tuple, list), dict)
def _subs(o, d):
return type(o)([subs(arg, d) for arg in o])
@dispatch(Node, dict)
def _subs(o, d):
"""
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> subs(t, {'balance': 'amount'}).fields
['name', 'amount']
"""
newargs = [subs(arg, d) for arg in o._args]
return type(o)(*newargs)
@dispatch(object, dict)
def _subs(o, d):
""" Private dispatched version of ``subs``
>>> subs('Hello', {})
'Hello'
"""
return o
def path(a, b):
""" A path of nodes from a to b
>>> from blaze.expr import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = t.amount.sum()
>>> list(path(expr, t))
[sum(t.amount), t.amount, t]
"""
while not a.isidentical(b):
yield a
if not a._inputs:
break
for child in a._inputs:
if any(b.isidentical(node) for node in child._traverse()):
a = child
break
yield a
def common_subexpression(*exprs):
""" Common sub expression between subexpressions
Examples
--------
>>> from blaze.expr import symbol, common_subexpression
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> common_subexpression(t.x, t.y)
t
"""
sets = [set(subterms(t)) for t in exprs]
return builtins.max(set.intersection(*sets),
key=compose(len, str))
def eval_str(expr):
""" String suitable for evaluation
>>> from blaze.expr import symbol, eval_str
>>> x = symbol('x', 'real')
>>> eval_str(2*x + 1)
'(2 * x) + 1'
>>> from datetime import date
>>> eval_str(date(2000, 1, 20))
'datetime.date(2000, 1, 20)'
"""
from datetime import date, datetime
if isinstance(expr, (date, datetime)):
return repr(expr)
return repr(expr) if isinstance(expr, _strtypes) else str(expr)
def parenthesize(s):
"""
>>> parenthesize('1')
'1'
>>> parenthesize('1 + 2')
'(1 + 2)'
"""
if ' ' in s:
return '(%s)' % s
else:
return s
|
bsd-3-clause
| 5,760,293,578,376,260,000
| 23.464953
| 79
| 0.530895
| false
| 3.665033
| false
| false
| false
|
rs2/pandas
|
pandas/conftest.py
|
1
|
33159
|
"""
This file is very long and growing, but it was decided to not split it yet, as
it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989
Instead of splitting it was decided to define sections here:
- Configuration / Settings
- Autouse fixtures
- Common arguments
- Missing values & co.
- Classes
- Indices
- Series'
- DataFrames
- Operators & Operations
- Data sets/files
- Time zones
- Dtypes
- Misc
"""
from collections import abc
from datetime import date, time, timedelta, timezone
from decimal import Decimal
import operator
import os
from dateutil.tz import tzlocal, tzutc
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pytz import FixedOffset, utc
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.core import ops
from pandas.core.indexes.api import Index, MultiIndex
# ----------------------------------------------------------------
# Configuration / Settings
# ----------------------------------------------------------------
# pytest
def pytest_configure(config):
# Register marks to avoid warnings in pandas.test()
# sync with setup.cfg
config.addinivalue_line("markers", "single: mark a test as single cpu only")
config.addinivalue_line("markers", "slow: mark a test as slow")
config.addinivalue_line("markers", "network: mark a test as network")
config.addinivalue_line(
"markers", "db: tests requiring a database (mysql or postgres)"
)
config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only")
config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test")
config.addinivalue_line(
"markers", "arm_slow: mark a test as slow for arm64 architecture"
)
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
parser.addoption("--skip-network", action="store_true", help="skip network tests")
parser.addoption("--skip-db", action="store_true", help="skip db tests")
parser.addoption(
"--run-high-memory", action="store_true", help="run high memory tests"
)
parser.addoption("--only-slow", action="store_true", help="run only slow tests")
parser.addoption(
"--strict-data-files",
action="store_true",
help="Fail if a test is skipped for missing data file.",
)
def pytest_runtest_setup(item):
if "slow" in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if "slow" not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if "network" in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if "db" in item.keywords and item.config.getoption("--skip-db"):
pytest.skip("skipping due to --skip-db")
if "high_memory" in item.keywords and not item.config.getoption(
"--run-high-memory"
):
pytest.skip("skipping high memory test since --run-high-memory was not set")
# Hypothesis
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
suppress_health_check=(hypothesis.HealthCheck.too_slow,),
)
hypothesis.settings.load_profile("ci")
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
)
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
),
)
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12),
),
)
# ----------------------------------------------------------------
# Autouse fixtures
# ----------------------------------------------------------------
@pytest.fixture(autouse=True)
def configure_tests():
"""
Configure settings for all tests and test modules.
"""
pd.set_option("chained_assignment", "raise")
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
"""
Make `np` and `pd` names available for doctests.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
# ----------------------------------------------------------------
# Common arguments
# ----------------------------------------------------------------
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[0, "index"], ids=lambda x: f"axis {repr(x)}")
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param
@pytest.fixture(params=[True, False, None])
def observed(request):
"""
Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
parameter is not passed).
"""
return request.param
@pytest.fixture(params=[True, False, None])
def ordered(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
return request.param
@pytest.fixture(params=["first", "last", False])
def keep(request):
"""
Valid values for the 'keep' parameter used in
.duplicated or .drop_duplicates
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
Fixture for trying all interval closed parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed.
"""
return request.param
@pytest.fixture(params=[None, "gzip", "bz2", "zip", "xz"])
def compression(request):
"""
Fixture for trying common compression types in compression tests.
"""
return request.param
@pytest.fixture(params=["gzip", "bz2", "zip", "xz"])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case.
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable.
"""
return request.param
@pytest.fixture(params=["inner", "outer", "left", "right"])
def join_type(request):
"""
Fixture for trying all types of join operations.
"""
return request.param
@pytest.fixture(params=["nlargest", "nsmallest"])
def nselect_method(request):
"""
Fixture for trying all nselect methods.
"""
return request.param
# ----------------------------------------------------------------
# Missing values & co.
# ----------------------------------------------------------------
@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), pd.NA], ids=str)
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once.
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
# ----------------------------------------------------------------
# Classes
# ----------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series], ids=["index", "series"])
def index_or_series(request):
"""
Fixture to parametrize over Index and Series, made necessary by a mypy
bug, giving an error:
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
See GH#29725
"""
return request.param
# Generate cartesian product of index_or_series fixture:
index_or_series2 = index_or_series
@pytest.fixture
def dict_subclass():
"""
Fixture for a dictionary subclass.
"""
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
return TestSubDict
@pytest.fixture
def non_dict_mapping_subclass():
"""
Fixture for a non-mapping dictionary subclass.
"""
class TestNonDictMapping(abc.Mapping):
def __init__(self, underlying_dict):
self._data = underlying_dict
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
return TestNonDictMapping
# ----------------------------------------------------------------
# Indices
# ----------------------------------------------------------------
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""
DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data
"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
def _create_multiindex():
"""
MultiIndex used to test the general functionality of this object
"""
# See Also: tests.multi.conftest.idx
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index_names = ["first", "second"]
mi = MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=index_names,
verify_integrity=False,
)
return mi
def _create_mi_with_dt64tz_level():
"""
MultiIndex with a level that is a tzaware DatetimeIndex.
"""
# GH#8367 round trip with pickle
return MultiIndex.from_product(
[[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")],
names=["one", "two", "three"],
)
indices_dict = {
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"bool": tm.makeBoolIndex(10),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
"multi": _create_multiindex(),
"repeats": Index([0, 0, 1, 1, 2, 2]),
}
@pytest.fixture(params=indices_dict.keys())
def index(request):
"""
Fixture for many "simple" kinds of indices.
These indices are unlikely to cover corner cases, e.g.
- no names
- no NaTs/NaNs
- no values near implementation bounds
- ...
"""
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
# Needed to generate cartesian product of indices
index_fixture2 = index
@pytest.fixture(params=indices_dict.keys())
def index_with_missing(request):
"""
Fixture for indices with missing values
"""
if request.param in ["int", "uint", "range", "empty", "repeats"]:
pytest.xfail("missing values not supported")
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# Azure pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
# For setting missing values in the top level of MultiIndex
vals = ind.tolist()
vals[0] = tuple([None]) + vals[0][1:]
vals[-1] = tuple([None]) + vals[-1][1:]
return MultiIndex.from_tuples(vals)
else:
vals[0] = None
vals[-1] = None
return type(ind)(vals)
# ----------------------------------------------------------------
# Series'
# ----------------------------------------------------------------
@pytest.fixture
def empty_series():
return pd.Series([], index=[], dtype=np.float64)
@pytest.fixture
def string_series():
"""
Fixture for Series of floats with Index of unique strings
"""
s = tm.makeStringSeries()
s.name = "series"
return s
@pytest.fixture
def object_series():
"""
Fixture for Series of dtype object with Index of unique strings
"""
s = tm.makeObjectSeries()
s.name = "objects"
return s
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
s = tm.makeTimeSeries()
s.name = "ts"
return s
def _create_series(index):
""" Helper for the _series dict """
size = len(index)
data = np.random.randn(size)
return pd.Series(data, index=index, name="a")
_series = {
f"series-with-{index_id}-index": _create_series(index)
for index_id, index in indices_dict.items()
}
@pytest.fixture
def series_with_simple_index(index):
"""
Fixture for tests on series with changing types of indices.
"""
return _create_series(index)
_narrow_dtypes = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
_narrow_series = {
f"{dtype.__name__}-series": tm.makeFloatSeries(name="a").astype(dtype)
for dtype in _narrow_dtypes
}
@pytest.fixture(params=_narrow_series.keys())
def narrow_series(request):
"""
Fixture for Series with low precision data types
"""
# copy to avoid mutation, e.g. setting .name
return _narrow_series[request.param].copy()
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@pytest.fixture(params=_index_or_series_objs.keys())
def index_or_series_obj(request):
"""
Fixture for tests on indexes, series and series with a narrow dtype
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
# ----------------------------------------------------------------
# DataFrames
# ----------------------------------------------------------------
@pytest.fixture
def empty_frame():
return DataFrame()
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
A B C D
vpBeWjM651 1 0 1 0
5JyxmrP1En -1 0 0 0
qEDaoD49U2 -1 1 0 0
m66TkTfsFe 0 0 0 0
EHPaNzEUFm -1 0 -1 0
fpRJCevQhi 2 0 0 0
OlQvnmfi3Q 0 0 -2 0
... .. .. .. ..
uB1FPlz4uP 0 0 0 1
EcSe6yNzCU 0 0 -1 0
L50VudaiI8 -1 1 -2 0
y3bpw4nwIp 0 -1 0 0
H0RdLLwrCT 1 1 0 0
rY82K0vMwm 0 0 0 0
1OPIUjnkjk 2 0 0 0
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData()).astype("int64")
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
A B C D
2000-01-03 -1.122153 0.468535 0.122226 1.693711
2000-01-04 0.189378 0.486100 0.007864 -1.216052
2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
2000-01-06 0.430050 0.894352 0.090719 0.036939
2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
2000-01-10 -0.752633 0.328434 -0.815325 0.699674
2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
... ... ... ... ...
2000-02-03 1.642618 -0.579288 0.046005 1.385249
2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
2000-02-07 -2.656149 -0.601387 1.410148 0.444150
2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
2000-02-09 1.377373 0.398619 1.008453 -0.928207
2000-02-10 0.473194 -0.636677 0.984058 0.511519
2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
[30 rows x 4 columns]
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
... ... ... ... ...
IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData())
# ----------------------------------------------------------------
# Operators & Operations
# ----------------------------------------------------------------
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
]
)
def all_arithmetic_functions(request):
"""
Fixture for operator and roperator arithmetic functions.
Notes
-----
This includes divmod and rdivmod, whereas all_arithmetic_operators
does not.
"""
return request.param
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
"prod",
"std",
"var",
"median",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_all_reductions = _all_numeric_reductions + _all_boolean_reductions
@pytest.fixture(params=_all_reductions)
def all_reductions(request):
"""
Fixture for all (boolean + numeric) reduction names.
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
def compare_operators_no_eq_ne(request):
"""
Fixture for dunder names for compare operations except == and !=
* >=
* >
* <
* <=
"""
return request.param
@pytest.fixture(
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
)
def all_logical_operators(request):
"""
Fixture for dunder names for common logical operations
* |
* &
* ^
"""
return request.param
# ----------------------------------------------------------------
# Data sets/files
# ----------------------------------------------------------------
@pytest.fixture
def strict_data_files(pytestconfig):
"""
Returns the configuration for the test setting `--strict-data-files`.
"""
return pytestconfig.getoption("--strict-data-files")
@pytest.fixture
def datapath(strict_data_files):
"""
Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if strict_data_files:
raise ValueError(
f"Could not find file {path} and --strict-data-files is set."
)
else:
pytest.skip(f"Could not find {path}.")
return path
return deco
@pytest.fixture
def iris(datapath):
"""
The iris dataset as a DataFrame.
"""
return pd.read_csv(datapath("io", "data", "csv", "iris.csv"))
# ----------------------------------------------------------------
# Time zones
# ----------------------------------------------------------------
TIMEZONES = [
None,
"UTC",
"US/Eastern",
"Asia/Tokyo",
"dateutil/US/Pacific",
"dateutil/Asia/Singapore",
tzutc(),
tzlocal(),
FixedOffset(300),
FixedOffset(0),
FixedOffset(-300),
timezone.utc,
timezone(timedelta(hours=1)),
timezone(timedelta(hours=-1), name="foo"),
]
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
# Generate cartesian product of tz_aware_fixture:
tz_aware_fixture2 = tz_aware_fixture
@pytest.fixture(scope="module")
def datetime_tz_utc():
"""
Yields the UTC timezone object from the datetime module.
"""
return timezone.utc
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
Fixture to provide variants of UTC timezone strings and tzinfo objects.
"""
return request.param
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
@pytest.fixture(params=tm.STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=tm.BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
"""
return request.param
@pytest.fixture(params=tm.OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
* object
* 'object'
"""
return request.param
@pytest.fixture(params=tm.DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
"""
return request.param
@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex
* 'complex64'
* 'complex128'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int
* 'int8'
* 'int16'
* 'int32'
* 'int64'
"""
return request.param
@pytest.fixture(params=tm.UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* 'uint8'
* 'uint16'
* 'uint32'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES)
def any_nullable_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES)
def any_signed_nullable_int_dtype(request):
"""
Parameterized fixture for any signed nullable integer dtype.
* 'Int8'
* 'Int16'
* 'Int32'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* bool
* 'bool'
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
* complex
* 'complex64'
* 'complex128'
* str
* 'str'
* 'U'
* bytes
* 'bytes'
* 'datetime64[ns]'
* 'M8[ns]'
* 'timedelta64[ns]'
* 'm8[ns]'
* object
* 'object'
"""
return request.param
# categoricals are handled separately
_any_skipna_inferred_dtype = [
("string", ["a", np.nan, "c"]),
("string", ["a", pd.NA, "c"]),
("bytes", [b"a", np.nan, b"c"]),
("empty", [np.nan, np.nan, np.nan]),
("empty", []),
("mixed-integer", ["a", np.nan, 2]),
("mixed", ["a", np.nan, 2.0]),
("floating", [1.0, np.nan, 2.0]),
("integer", [1, np.nan, 2]),
("mixed-integer-float", [1, np.nan, 2.0]),
("decimal", [Decimal(1), np.nan, Decimal(2)]),
("boolean", [True, np.nan, False]),
("boolean", [True, pd.NA, False]),
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
("datetime", [pd.Timestamp("20130101"), np.nan, pd.Timestamp("20180101")]),
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
# The following two dtypes are commented out due to GH 23554
# ('complex', [1 + 1j, np.nan, 2 + 2j]),
# ('timedelta64', [np.timedelta64(1, 'D'),
# np.nan, np.timedelta64(2, 'D')]),
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
("time", [time(1), np.nan, time(2)]),
("period", [pd.Period(2013), pd.NaT, pd.Period(2018)]),
("interval", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),
]
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
def any_skipna_inferred_dtype(request):
"""
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> import pandas._libs.lib as lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
# GH#35711 make sure sqlite history file handle is not leaked
from traitlets.config import Config # noqa: F401 isort:skip
c = Config()
c.HistoryManager.hist_file = ":memory:"
return InteractiveShell(config=c)
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@pytest.fixture(params=list(tm.cython_table))
def cython_table_items(request):
"""
Yields a tuple of a function and its corresponding name. Correspond to
the list of aggregator "Cython functions" used on selected table items.
"""
return request.param
@pytest.fixture(
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
]
)
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture()
def fsspectest():
pytest.importorskip("fsspec")
from fsspec import register_implementation
from fsspec.implementations.memory import MemoryFileSystem
from fsspec.registry import _registry as registry
class TestMemoryFS(MemoryFileSystem):
protocol = "testmem"
test = [None]
def __init__(self, **kwargs):
self.test[0] = kwargs.pop("test", None)
super().__init__(**kwargs)
register_implementation("testmem", TestMemoryFS, clobber=True)
yield TestMemoryFS()
registry.pop("testmem", None)
TestMemoryFS.test[0] = None
TestMemoryFS.store.clear()
|
bsd-3-clause
| -5,213,168,821,077,794,000
| 24.645012
| 88
| 0.588407
| false
| 3.489319
| true
| false
| false
|
reunition/reunition
|
reunition/apps/alumni/test_models.py
|
1
|
1192
|
from django.test import TestCase
from model_mommy import mommy
class PersonTests(TestCase):
def test_display_name(self):
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
)
self.assertEqual(person.display_name, 'Bobbie Smith')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_first_name='Roberta',
)
self.assertEqual(person.display_name, 'Roberta (Bobbie) Smith')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_last_name='Jones',
)
self.assertEqual(person.display_name, 'Bobbie Jones (Smith)')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_first_name='Roberta',
current_last_name='Jones',
)
self.assertEqual(person.display_name, 'Roberta Jones (Bobbie Smith)')
|
mit
| -1,216,111,322,849,305,900
| 29.564103
| 77
| 0.571309
| false
| 3.601208
| false
| false
| false
|
rulz/django-registration
|
registration/models.py
|
1
|
11801
|
from __future__ import unicode_literals
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.template import RequestContext, TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.utils import six
from registration.users import UserModel, UserModelString
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True, request=None):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
Additionally, if email is sent and ``request`` is supplied,
it will be passed to the email template.
"""
new_user = UserModel().objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site, request)
return new_user
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(six.text_type(random.random()).encode('ascii')).hexdigest()[:5]
salt = salt.encode('ascii')
username = user.username
if isinstance(username, six.text_type):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except UserModel().DoesNotExist:
profile.delete()
@python_2_unicode_compatible
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = "ALREADY_ACTIVATED"
#user = models.ForeignKey(UserModelString(), unique=True, verbose_name=_('user'))
user = models.OneToOneField(UserModelString(), verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return (self.activation_key == self.ACTIVATED or
(self.user.date_joined + expiration_date <= datetime_now()))
activation_key_expired.boolean = True
def send_activation_email(self, site, request=None):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the text body of the email.
``registration/activation_email.html``
This template will be used for the html body of the email.
These templates will each receive the following context
variables:
``user``
The new user account
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
``request``
Optional Django's ``HttpRequest`` object from view.
If supplied will be passed to the template for better
flexibility via ``RequestContext``.
"""
ctx_dict = {}
if request is not None:
ctx_dict = RequestContext(request, ctx_dict)
# update ctx_dict after RequestContext is created
# because template context processors
# can overwrite some of the values like user
# if django.contrib.auth.context_processors.auth is used
ctx_dict.update({
'user': self.user,
'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site,
})
subject = getattr(settings, 'REGISTRATION_EMAIL_SUBJECT_PREFIX', '') + \
render_to_string('registration/activation_email_subject.txt', ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message_txt = render_to_string('registration/activation_email.txt', ctx_dict)
email_message = EmailMultiAlternatives(subject, message_txt, settings.DEFAULT_FROM_EMAIL, [self.user.email])
try:
message_html = render_to_string('registration/activation_email.html', ctx_dict)
except TemplateDoesNotExist:
message_html = None
if message_html:
email_message.attach_alternative(message_html, 'text/html')
email_message.send()
|
bsd-3-clause
| -2,108,707,373,669,058,000
| 37.819079
| 116
| 0.641132
| false
| 4.836475
| false
| false
| false
|
open-synergy/opnsynid-stock-logistics-warehouse
|
stock_production_operation/models/stock_warehouse.py
|
1
|
6557
|
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, _
from openerp.exceptions import Warning as UserError
class StockWarehouse(models.Model):
_inherit = "stock.warehouse"
production_rm_type_id = fields.Many2one(
string="Raw Material Consumption Type",
comodel_name="stock.picking.type"
)
production_rm_loc_id = fields.Many2one(
string="Raw Material Consumption Location",
comodel_name="stock.location"
)
production_fg_type_id = fields.Many2one(
string="Production Result Type",
comodel_name="stock.picking.type"
)
production_fg_loc_id = fields.Many2one(
string="Production Result Location",
comodel_name="stock.location"
)
@api.multi
def _prepare_production_rm_location(self):
self.ensure_one()
parent_location = self.view_location_id
data = {
"name": _("RM Consumption"),
"location_id": parent_location.id,
"usage": "production",
"active": True
}
return data
@api.multi
def _prepare_production_fg_location(self):
self.ensure_one()
parent_location = self.view_location_id
data = {
"name": _("Production Result"),
"location_id": parent_location.id,
"usage": "production",
"active": True
}
return data
@api.multi
def _prepare_production_rm_sequence(self):
self.ensure_one()
data = {
"name": self.code + " - RM Consumption",
"prefix": self.code + "/RM/",
"padding": 6
}
return data
@api.multi
def _prepare_production_fg_sequence(self):
self.ensure_one()
data = {
"name": self.code + " - Production Result",
"prefix": self.code + "/FG/",
"padding": 6
}
return data
@api.multi
def _prepare_production_rm_type(self):
self.ensure_one()
obj_sequence = self.env['ir.sequence']
src_location = self.lot_stock_id
dest_location = self._get_production_rm_location()
sequence = obj_sequence.create(
self._prepare_production_rm_sequence())
data = {
"name": _("RM Consumption"),
"warehouse_id": self.id,
"sequence_id": sequence.id,
"code": "outgoing",
"default_location_src_id": src_location.id,
"allowed_location_ids": [(6, 0, [src_location.id])],
"default_location_dest_id": dest_location.id,
"allowed_dest_location_ids": [(6, 0, [dest_location.id])],
}
return data
@api.multi
def _prepare_production_fg_type(self):
self.ensure_one()
obj_sequence = self.env['ir.sequence']
dest_location = self.lot_stock_id
src_location = self._get_production_fg_location()
sequence = obj_sequence.create(
self._prepare_production_fg_sequence())
data = {
"name": _("Production Result"),
"warehouse_id": self.id,
"sequence_id": sequence.id,
"code": "incoming",
"default_location_src_id": src_location.id,
"allowed_location_ids": [(6, 0, [src_location.id])],
"default_location_dest_id": dest_location.id,
"allowed_dest_location_ids": [(6, 0, [dest_location.id])],
}
return data
@api.multi
def _get_production_rm_location(self):
self.ensure_one()
if not self.production_rm_loc_id:
raise UserError(_("No RM Consumption location"))
return self.production_rm_loc_id
@api.multi
def _get_production_fg_location(self):
self.ensure_one()
if not self.production_fg_loc_id:
raise UserError(_("No production result location"))
return self.production_fg_loc_id
@api.multi
def _create_production_rm_loc(self):
self.ensure_one()
obj_loc = self.env["stock.location"]
production_rm_loc = obj_loc.create(
self._prepare_production_rm_location())
return production_rm_loc
@api.multi
def _create_production_fg_loc(self):
self.ensure_one()
obj_loc = self.env["stock.location"]
production_fg_loc = obj_loc.create(
self._prepare_production_fg_location())
return production_fg_loc
@api.multi
def _create_production_rm_type(self):
self.ensure_one()
obj_type = self.env["stock.picking.type"]
production_rm_type = obj_type.create(
self._prepare_production_rm_type())
return production_rm_type
@api.multi
def _create_production_fg_type(self):
self.ensure_one()
obj_type = self.env["stock.picking.type"]
production_fg_type = obj_type.create(
self._prepare_production_fg_type())
return production_fg_type
@api.multi
def button_create_production_rm_loc(self):
for wh in self:
production_rm_loc = wh._create_production_rm_loc()
self.production_rm_loc_id = production_rm_loc.id
@api.multi
def button_create_production_fg_loc(self):
for wh in self:
production_fg_loc = wh._create_production_fg_loc()
self.production_fg_loc_id = production_fg_loc.id
@api.multi
def button_create_production_rm_type(self):
for wh in self:
production_rm_type = wh._create_production_rm_type()
self.production_rm_type_id = production_rm_type.id
@api.multi
def button_create_production_fg_type(self):
for wh in self:
production_fg_type = wh._create_production_fg_type()
self.production_fg_type_id = production_fg_type.id
@api.model
def create(self, values):
new_wh = super(StockWarehouse, self).create(values)
production_rm_loc = new_wh._create_production_rm_loc()
production_fg_loc = new_wh._create_production_fg_loc()
new_wh.production_rm_loc_id = production_rm_loc.id
new_wh.production_fg_loc_id = production_fg_loc.id
production_rm_type = new_wh._create_production_rm_type()
production_fg_type = new_wh._create_production_fg_type()
new_wh.production_rm_type_id = production_rm_type.id
new_wh.production_fg_type_id = production_fg_type.id
return new_wh
|
agpl-3.0
| 7,212,818,813,044,937,000
| 31.785
| 70
| 0.583804
| false
| 3.731929
| false
| false
| false
|
puruckertom/ubertool
|
ubertool/earthworm/earthworm_exe.py
|
1
|
2201
|
from __future__ import division
import pandas as pd
import os.path
import sys
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
from .earthworm_functions import EarthwormFunctions
class EarthwormInputs(ModelSharedInputs):
"""
Input class for Earthworm.
"""
def __init__(self):
"""Class representing the inputs for Earthworm"""
super(EarthwormInputs, self).__init__()
self.k_ow = pd.Series([], dtype="float")
self.l_f_e = pd.Series([], dtype="float")
self.c_s = pd.Series([], dtype="float")
self.k_d = pd.Series([], dtype="float")
self.p_s = pd.Series([], dtype="float")
class EarthwormOutputs(object):
"""
Output class for Earthworm.
"""
def __init__(self):
"""Class representing the outputs for Earthworm"""
super(EarthwormOutputs, self).__init__()
self.out_earthworm_fugacity = pd.Series(name="out_earthworm_fugacity")
class Earthworm(UberModel, EarthwormInputs, EarthwormOutputs, EarthwormFunctions):
"""
Earthworm model for annelid soil ingestion.
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the Earthworm model and containing all its methods"""
super(Earthworm, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
self.populate_inputs(self.pd_obj)
self.pd_obj_out = self.populate_outputs()
self.run_methods()
self.fill_output_dataframe()
# Begin model methods
def run_methods(self):
""" Execute all algorithm methods for model logic """
try:
self.earthworm_fugacity()
except Exception as e:
pass
|
unlicense
| 1,824,656,894,426,855,400
| 30.442857
| 87
| 0.624262
| false
| 3.656146
| false
| false
| false
|
defm03/toraeru
|
test/loli_gelbooru.py
|
1
|
3832
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*booru general file.
For now, there's working Gelbooru downloader for loli content,
but soon I'll add danbooru, etc.
"""
import loli_spam
import os
import datetime
import urllib.request
import http.cookiejar
import xml.etree.ElementTree as eltree
import json
#loli_spam.execute_spam()
cache_dir = "cache/"
class Gelbooru(object):
"""docstring for Gelbooru"""
def __init__(self, url="http://gelbooru.com/"):
# gets gelbooru homepage by default
super(Gelbooru, self).__init__()
self.url = url
gelbooru_loli = urllib.request.urlopen(url,timeout=5)
read_gel_loli = gelbooru_loli.read()
# save to gel.html file
name_gel_loli = "gel.html"
file_gel_loli = open(cache_dir+name_gel_loli,"wb")
file_gel_loli.write(read_gel_loli)
def gel_rssatom(url="http://gelbooru.com/index.php?page=atom",
by_tag_loli = False,limit = 100,download = True):
"""gel_rssatom:
by_tag_loli:
If you want to get feed for tag 'loli', you need to switch
by_tag_loli to True.
limit:
limit is variable that stores maximum number of loli entries.
maximum number of entries that can be loaded is 100 (limited
by gelbooru API). When I was testing it, there was some problem
with loading less than 5-10 urls.
"""
if by_tag_loli == True:
url = "http://gelbooru.com/index.php?page=dapi&s=post&q=index&limit={0}&tags=loli".format(str(limit))
# gets gelbooru atom rss feed
gelbooru_atom = urllib.request.urlopen(url,timeout=5)
read_gel_atom = gelbooru_atom.read()
# save to atom.xml file
if by_tag_loli == True:
name_gel_atom = "atom_loli.xml"
else: name_gel_atom = "atom.xml"
file_gel_atom = open(cache_dir+name_gel_atom,"wb")
file_gel_atom.write(read_gel_atom)
# XML parsing
tree = eltree.parse(cache_dir+name_gel_atom)
root = tree.getroot()
# gets urls to images from post form
for imgurl in root.iter('post'):
url = imgurl.attrib.get('file_url')
print(url)
# gets picture file name
f_url = url.replace(url[0:37],"")
if download == True and os.path.exists(cache_dir+f_url) == False:
# if file is already downloaded, it will skip it
urllib.request.urlretrieve(url,cache_dir+f_url)
print(f_url)
class Danbooru(object):
"""docstring for Danbooru"""
def __init__(self, url="http://gelbooru.com/"):
super(Danbooru, self).__init__()
self.url = url
def get_time():
# datetime.datetime.now() method
now = datetime.datetime.now()
hour = datetime.time(now.hour)
minute = datetime.time(now.minute)
second = datetime.time(now.second)
# isoformat() >> str method
isotime = datetime.datetime.now().isoformat()
s_iso = str(isotime)
s_iso[0:9] = date
def dan_jsonGET(url="http://gelbooru.com/",tag="loli",limit=100):
# sends request to json API on danbooru and saves in variable 'json_r'
json_g = urllib.request.urlopen(url+"posts.json?limit={0}?search[tags]={1}".format(str(limit), tag))
json_r = json_g.read()
# opens file following new filename format, and writes json data to it
file_dan = open(cache_dir+"danbooru-"+date+"-T-"+str(hour)+"-"+str(minute)+"-"+str(second)+".json", "wb")
file_dan.write(json_r)
"""Filename new format:
example: danbooru-2013-10-08-T-19-11-12.json
1st place: Object name
2nd place: Date in iso format
3rd place: (starting with "-T-") Time: hour - minute - second
"""
def execute_gel(take_limit=100):
# auto get a page, and put into "gel.html" file
Gelbooru("http://gelbooru.com/index.php?page=post&s=list&tags=loli")
maigah = Gelbooru.gel_rssatom(by_tag_loli=True,limit=take_limit)
def execute_dan(take_limit=100):
# calls dan_jsonGET -> saving 100 entries with tag "loli"
# to file following format in Danbooru init()
omgomg = Danbooru.dan_jsonGET(tag="loli",limit=take_limit)
|
gpl-3.0
| -7,906,757,162,575,998,000
| 29.420635
| 108
| 0.679541
| false
| 2.708127
| false
| false
| false
|
mitsuhiko/sentry
|
src/sentry/models/environment.py
|
1
|
1364
|
"""
sentry.models.release
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
BoundedPositiveIntegerField, Model, sane_repr
)
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5
class Environment(Model):
__core__ = False
project_id = BoundedPositiveIntegerField()
name = models.CharField(max_length=64)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_environment'
unique_together = (('project_id', 'name'),)
__repr__ = sane_repr('project_id', 'name')
@classmethod
def get_cache_key(cls, project_id, name):
return 'env:1:%s:%s' % (project_id, md5(name).hexdigest())
@classmethod
def get_or_create(cls, project, name):
name = name or ''
cache_key = cls.get_cache_key(project.id, name)
env = cache.get(cache_key)
if env is None:
env = cls.objects.get_or_create(
project_id=project.id,
name=name,
)[0]
cache.set(cache_key, env, 3600)
return env
|
bsd-3-clause
| 918,329,941,305,556,000
| 25.230769
| 75
| 0.621701
| false
| 3.696477
| false
| false
| false
|
MangoMangoDevelopment/neptune
|
lib/ros_comm-1.12.0/utilities/roswtf/src/roswtf/network.py
|
1
|
4458
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: environment.py 4428 2009-05-05 05:48:36Z jfaustwg $
import os
import socket
import stat
import string
import sys
import rosgraph
import rosgraph.network
from roswtf.rules import warning_rule, error_rule
# #1220
def ip_check(ctx):
# best we can do is compare roslib's routine against socket resolution and make sure they agree
local_addrs = rosgraph.network.get_local_addresses()
resolved_ips = [host[4][0] for host in socket.getaddrinfo(socket.gethostname(), 0, 0, 0, socket.SOL_TCP)]
global_ips = [ ip for ip in resolved_ips if not ip.startswith('127.') and not ip == '::1']
remote_ips = list(set(global_ips) - set(local_addrs))
if remote_ips:
retval = "Local hostname [%s] resolves to [%s], which does not appear to be a local IP address %s." % (socket.gethostname(), ','.join(remote_ips), str(local_addrs))
# IPv6 support % to denote zone/scope ids. The value is expanded
# in other functions, this is why we are using replace command in
# the return. For more info https://github.com/ros/ros_comm/pull/598
return retval.replace('%', '%%')
# suggestion by mquigley based on laptop dhcp issues
def ros_hostname_check(ctx):
"""Make sure that ROS_HOSTNAME resolves to a local IP address"""
if not rosgraph.ROS_HOSTNAME in ctx.env:
return
hostname = ctx.env[rosgraph.ROS_HOSTNAME]
try:
resolved_ips = [host[4][0] for host in socket.getaddrinfo(hostname, 0, 0, 0, socket.SOL_TCP)]
except socket.gaierror:
return "ROS_HOSTNAME [%s] cannot be resolved to an IP address"%(hostname)
# best we can do is compare roslib's routine against socket resolution and make sure they agree
local_addrs = rosgraph.network.get_local_addresses()
remote_ips = list(set(resolved_ips) - set(local_addrs))
if remote_ips:
return "ROS_HOSTNAME [%s] resolves to [%s], which does not appear to be a local IP address %s."%(hostname, ','.join(remote_ips), str(local_addrs))
def ros_ip_check(ctx):
"""Make sure that ROS_IP is a local IP address"""
if not rosgraph.ROS_IP in ctx.env:
return
ip = ctx.env[rosgraph.ROS_IP]
# best we can do is compare roslib's routine against socket resolution and make sure they agree
addrs = rosgraph.network.get_local_addresses()
if ip not in addrs:
return "ROS_IP [%s] does not appear to be a local IP address %s."%(ip, str(addrs))
# Error/Warning Rules
warnings = [
(ros_hostname_check,
"ROS_HOSTNAME may be incorrect: "),
(ros_ip_check,
"ROS_IP may be incorrect: "),
]
errors = [
(ip_check,
"Local network configuration is invalid: "),
]
def wtf_check(ctx):
for r in warnings:
warning_rule(r, r[0](ctx), ctx)
for r in errors:
error_rule(r, r[0](ctx), ctx)
|
bsd-3-clause
| 3,243,990,153,839,998,500
| 38.105263
| 172
| 0.700987
| false
| 3.743073
| false
| false
| false
|
yro/openveda
|
openveda/reporting.py
|
1
|
2431
|
import os
import sys
"""
Quick and dirty error handling & logging
"""
from global_vars import *
class ErrorObject(object):
"""
Unspecified errors with a message
"""
@staticmethod
def print_error(message):
decorator = "***************E*R*R*O*R*******************"
outgoing = '\n%s \n\n%s \n\n%s\n' % (
NODE_COLORS_BLUE + decorator + NODE_COLORS_END,
message,
NODE_COLORS_BLUE + decorator + NODE_COLORS_END,
)
print outgoing
class Output(object):
"""
Various reporting methods
"""
@staticmethod
def _seconds_from_string(duration):
hours = float(duration.split(':')[0])
minutes = float(duration.split(':')[1])
seconds = float(duration.split(':')[2])
duration_seconds = (((hours * 60) + minutes) * 60) + seconds
return duration_seconds
@staticmethod
def status_bar(process):
"""
A terminal status bar thing
"""
fps = None
duration = None
while True:
line = process.stdout.readline().strip()
if line == '' and process.poll() is not None:
break
if fps == None or duration == None:
if "Stream #" in line and " Video: " in line:
fps = [s for s in line.split(',') if "fps" in s][0].strip(' fps')
if "Duration: " in line:
dur = line.split('Duration: ')[1].split(',')[0].strip()
duration = Output()._seconds_from_string(duration=dur)
else:
if 'frame=' in line:
cur_frame = line.split('frame=')[1].split('fps=')[0].strip()
end_frame = float(duration) * float(fps.strip())
pctg = (float(cur_frame) / float(end_frame))
sys.stdout.write('\r')
i = int(pctg * 20.0)
sys.stdout.write("%s : [%-20s] %d%%" % ('Transcode', '='*i, int(pctg * 100)))
sys.stdout.flush()
"""
Just for politeness
"""
sys.stdout.write('\r')
sys.stdout.write("%s : [%-20s] %d%%" % ('Transcode', '='*20, 100))
sys.stdout.flush()
def main():
test_error = "This is a test"
ErrorObject.print_error(
message = test_error,
)
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| 3,754,043,802,549,835,000
| 27.267442
| 97
| 0.48334
| false
| 3.895833
| false
| false
| false
|
migumar2/uiHRDC
|
uiHRDC/benchmark/report/summaryTables/utils-py/generateSelfIdx.py
|
1
|
3853
|
from sys import argv
from mytiming import *
from mycheckfiles import *
##--main --------------------------------------------------------------##
variants=[]
variants.append( ["WCSA" ,"../../../self-indexes/collectResults/wcsa" ,"B-LOG.dat" , "S-LOG.dat" , "N.Wa_swcsa.dat" , "N.Wb_swcsa.dat" , "N.P2_swcsa.dat", "N.P5_swcsa.dat" ,"e.Words20_swcsa.dat" , "e.Words3000_swcsa.dat" ] )
variants.append( ["RLCSA" ,"../../../self-indexes/collectResults/rlcsa" ,"B-LOG.dat" , "S-LOG.dat" , "Wa_rlcsa" , "Wb_rlcsa" , "P2_rlcsa" , "P5_rlcsa" ,"e80_rlcsa" , "e13000_rlcsa" ] )
variants.append( ["SLP" ,"../../../self-indexes/collectResults/slp" ,"B-LOG.dat" , "S-LOG.dat" , "slp.f1_1000" , "slp.f1001_100k" , "slp.2_2" , "slp.5_5" ,"slp.snippets80" , "slp.snippets13000" ] )
variants.append( ["WSLP" ,"../../../self-indexes/collectResults/wslp" ,"B-LOG.dat" , "S-LOG.dat" , "wslp.f1_1000" , "wslp.f1001_100k" , "wslp.2_2" , "wslp.5_5" ,"wslp.snippets80" , "wslp.snippets13000" ] )
variants.append( ["LZ77-Index" ,"../../../self-indexes/collectResults/lz77" ,"B-LOG.dat" , "S-LOG.dat" , "lz77.f1_1000" , "lz77.f1001_100k" , "lz77.2_2" , "lz77.5_5" ,"lz77.snippets80" , "lz77.snippets13000" ] )
variants.append( ["LZEnd-Index" ,"../../../self-indexes/collectResults/lzend" ,"B-LOG.dat" , "S-LOG.dat" , "lzend.f1_1000" , "lzend.f1001_100k" , "lzend.2_2" , "lzend.5_5" ,"lzend.snippets80" , "lzend.snippets13000" ] )
#src=vbytePos[0:10] #src is a COPY of the list (which remains un-modified)
#src=rice[0:8] #src is a COPY of the list (which remains un-modified)
#src=riceB[0:8] #src is a COPY of the list (which remains un-modified)
header= r"""
%%%% STATIC HEADER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htbp]
\scriptsize
\centering
\begin{tabular}{|l|r|r|c|c|c|c|c|c|}
\cline{2-9} \multicolumn{1}{r|}{} & \multicolumn{2}{c|}{Overall Time} & \multicolumn{4}{c|}{Locate} &\multicolumn{2}{c|}{Extract} \\
\cline{2-9} \multicolumn{1}{r|}{} & \multicolumn{2}{c|}{ } & \multicolumn{2}{c|}{Words} & \multicolumn{2}{c|}{Phrases} & 80 & 13,000 \\
\cline{2-9} \multicolumn{1}{r|}{} & Building & Querying & {Low freq} & {High freq} & {2-words} & {5-words} & chars & chars \\
\hline
\hline
%%%% CONTENTS GENERATED BY SCRIPT %%%%%%%%%%%%%%%%%%%%%%%%%%"""
footer= r"""%%%% STATIC FOOTER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\cline{1-3}
\end{tabular}%
\caption{Summary and state of the experiments run on the test machine: self-indexes.}
\label{ap1:self}%
\end{table}%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
if len(argv) !=2:
print "Incorrect syntax! You must provide an output .tex filename"
print " use: python %s <filename.tex>" % argv[0]
exit(0)
filename= argv[1]
deleteFileIfExists(filename)
#print header
addDataToLog(filename, header)
#processes all the techniques in "variants"
for t in variants:
src=t[0:len(t)]
if len(src)==8:
src.append("none") #no extract80-file
src.append("none") #no extract13000-file
strresult= getStrIIResultIdx(src)
#print strresult
addDataToLog(filename, strresult)
str=getElapsedTime("../../../self-indexes/SELF-LOG.dat")
str=str.rjust(13)+" "
overall=r""" \hline
\textbf{OVERALL TIME } & \multicolumn{2}{|c|}{""" + str + r""" } &\multicolumn{4}{|r}{} \\"""
#print overall
addDataToLog(filename, overall)
#print footer
addDataToLog(filename, footer)
|
lgpl-2.1
| -2,661,403,691,565,364,000
| 35.695238
| 234
| 0.523748
| false
| 2.543234
| false
| false
| false
|
USGS-EROS/lcmap-changes
|
bin/pyccd_inputs.py
|
1
|
2416
|
#!/usr/bin/env python3
import argparse
import requests
import logging
import sys
__format = '%(asctime)s %(module)-10s::%(funcName)-20s - [%(lineno)-3d]%(message)s'
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format=__format,
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def get_chip_specs(host, port):
""" Returns all chip specs from the named host and port for pyccd"""
query = ''.join(['(((red OR blue OR green OR swir1 OR swir2 OR nir) AND sr)', ' ',
'OR (toa AND thermal AND NOT tirs2)', ' ',
'OR (cfmask AND NOT conf))', ' ',
#'AND NOT LANDSAT_8'])
])
chip_specs=''.join(['http://', host, ':', port, '/landsat/chip-specs?q=', query])
logger.debug("chip_specs url: {}".format(chip_specs))
return requests.get(chip_specs).json()
def get_ubids(chip_specs):
""" Return all ubids from supplied chip-specs """
return [ts['ubid'] for ts in chip_specs]
def url_template(ubids, start_date, end_date='{{now}}', host='localhost', port='80'):
""" Returns the inputs url template to be fed into algorithms configuration """
# TODO: gonna have to deal with the context path being different for local vs deployed
# /landsat here, probably / locally
base = ''.join(['http://', host, ':', port,
'/landsat/chips?x={{x}}&y={{y}}',
'&acquired=', start_date, '/', end_date])
ubids = ''.join(['&ubid={}'.format(u) for u in ubids])
return ''.join([base, ubids])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--host", action="store", help="host for lcmap-landsat api")
parser.add_argument("--port", action="store", help="port for lcmap-landsat api", default="80")
parser.add_argument("--start", action="store", help="start date for data query YYYY-MM-DD")
parser.add_argument("--end", action="store", help="end date for data query YYYY-MM-DD", default="{{now}}")
args = parser.parse_args()
if len(sys.argv) < 2 or not (args.host and args.start):
parser.print_usage()
sys.exit(1)
else:
print(url_template(sorted(list(set(get_ubids(get_chip_specs(args.host, args.port))))),
args.start, args.end, args.host, args.port))
|
unlicense
| -243,910,291,986,523,200
| 44.584906
| 114
| 0.57947
| false
| 3.532164
| false
| false
| false
|
crichardson17/starburst_atlas
|
HighResSims/Baseline_DustFree_Hires_cut17/Baseline_dustfree_plotter.py
|
1
|
12507
|
############################################################
############# Plotting File for Contour Plots ##############
################## Data read from Cloudy ###################
################ Helen Meskhidze, Fall 2015 ################
#################### Elon University #######################
#------------------------------------------------------------------------------------------------------
'''
The inputs this code takes are .grd and .txt files from Cloudy.
It can take in as many input files (in case you have a grid and haven't concatenated all the files)- just change the numFiles value
This code outputs a set of contour plots, saved to the working directory
'''
#------------------------------------------------------------------------------------------------------
#Packages importing
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
import time
# ------------------------------------------------------------------------------------------------------
# keep track of how long the code takes to run
t0 = time.clock()
headerloc = "/Users/helen/Documents/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 6 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print("Files names constructed")
# ------------------------------------------------------------------------------------------------------
#Patches data
#this section adds the rectangles on the plots of the three other studies
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.)] # ignored
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY]
path = Path(verts, codes)
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.)] # ignored
path = Path(verts, codes)
path2 = Path(verts2, codes)
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.)] # ignored
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the patches routine: to add patches for others peoples' data onto our plots.
#Adds patches to the first subplot
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='blue', lw=0)
patch = patches.PathPatch(path, facecolor='grey', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
plt.figure(figsize=(13,10))
def add_sub_plot(sub_num, elinesplot):
numplots = 16
plt.subplot(numplots/4.,4,sub_num) #row, column
#choose which z array, then which subplot
z_subnum = z_total[elinesplot]
z_line = z_subnum[:,:,sub_num-1]
contour1 = plt.contour(x_axis, y_axis, z_line, levels, colors='k', origin='lower', extent=extent) #teal contours, dashed
contourmap = plt.imshow(z_line, cmap='Reds', extent= extent, aspect = "auto",origin='lower', vmin=0, vmax =4)
plt.scatter(max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[elinesplot][sub_num-1]], xy=(8,11), xytext=(4,8.5), fontsize = 10)
plt.annotate(max_values[line[elinesplot][sub_num-1],0], xy = (max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == 4:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.5,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 8:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 12:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 0:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
#axis limits
yt_min = 8 ; yt_max = 17; xt_min = 0; xt_max = 10
plt.ylim(yt_min,yt_max); plt.xlim(xt_min,xt_max)
#ticks
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num == 0:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 12:
plt.tick_params(labelbottom = 'off')
if sub_num%(numplots/4) == 1:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
else:
plt.tick_params(labelleft = 'off')
if sub_num > 12:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
#to print progress to the terminal
if sub_num == numplots/2:
print("half the sub-plots of plot{:d} are complete".format(elinesplot+1))
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
# ---------------------------------------------------
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 10.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#select the scaling factor
#for 4860
incidentnum = 58 #reference index of 4860
incidentline = 4860. #wavelength
incident = Emissionlines[:,58]
print("Scaling data")
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10) > 0:
concatenated_data[i,j] = math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10)
else:
concatenated_data[i,j] == 0
print("Finding peaks")
#find the maxima (having cut the arrays already) to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print("Data arranged")
# ---------------------------------------------------
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines to plot here! indexes of desired lines
line = [
#UV1Lines
[0, 1, 2, 3, 5, 165, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
#977, 991, 1026, 1216, 1218, 1239, 1240, 1243, 1263, 1304, 1308, 1397, 1402, 1406, 1486, 1531
#UV2line
[16, 17, 18, 19, 20, 21, 23, 24, 25, 27, 29, 30,31, 32, 33, 34],
#1549, 1640, 1665, 1671, 1750, 1860, 1888, 1907, 2297, 2321, 2471, 2326, 2335, 2665, 2798
#Optical Lines
[36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52],
#NE 3 3343A, NE 5 3426, 3646, 3726, 3727, 3729, 3869, 3889, 3933, 4026, 4070, 4074, 4078, 4102, 4340, 4363
#Optical Lines 2
[53, 55, 56, 57, 59, 60, 61, 64, 65, 66, 67, 68, 69, 70, 71, 73],
#NE 4 4720A, AR 4 4740, 4861, O III 4959, O 3 5007, O 1 5577, N 2 5755, HE 1 5876, O 1 6300;
#S 3 6312, O 1 6363, H 1 6563, N 2 6584, S II 6716, S 2 6720, S II 6731
#IR Lines
[75, 76, 77, 78, 79, 80, 81, 82, 84, 83, 85, 86, 87, 88, 89, 90],
#AR 5 7005A, AR 3 7135A, TOTL 7325A, AR 3 7751, 6LEV 8446, CA2X 8498, CA2Y 8542, CA2Z 8662;
#CA 2 8579A, S 3 9069, H 1 9229, S 3 9532... H 1 9546
#More Lines
[97,112, 107, 110, 108, 111, 106, 109, 104, 101, 102, 105, 99, 103, 98, 100],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
]
# ---------------------------------------------------
Nx = len(np.where(y == y[0])[0])
Ny = len(np.where(x == x[0])[0])
x_axis = x[0:Nx]
y_axis = np.unique(y)
extent = [min(x_axis),max(x_axis),min(y_axis),max(y_axis)]
# ---------------------------------------------------
z_total = [None] * (len(line)-1)
#create z array for this plot
for i in range(len(z_total)):
zi1 = [concatenated_data[:,line[i]]]
zi2 = np.reshape(zi1,(Ny,Nx,16))
z_total[i] = zi2
# ---------------------------------------------------
#plotting features (and contour levels)
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
#levels = arange(10**-1,10, .2) #teal levels
levels = arange(10**-2,10**2, 1) #black levels
# ---------------------------------------------------
#loop through desired plots and desired subplots
print("Beginning plotting")
plt.clf()
for j in range (len(z_total)):
for i in range(16):
add_sub_plot(i,j)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
plt.savefig(("Full_lines_%d.pdf")%j)
print("plot {:d} complete".format(j+1))
plt.clf()
if (time.clock() - t0) > 120:
print((time.clock() - t0)/60., "minutes process time")
else:
print(time.clock() - t0, "seconds process time")
|
gpl-2.0
| 782,418,356,838,361,500
| 38.206897
| 240
| 0.595986
| false
| 2.876495
| false
| false
| false
|
suutari/shoop
|
shuup/front/checkout/_services.py
|
1
|
2897
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import abc
import six
from shuup.apps.provides import get_provide_objects
from shuup.core.models import ServiceProvider
from ._view_mixin import CheckoutPhaseViewMixin
class ServiceCheckoutPhaseProvider(six.with_metaclass(abc.ABCMeta)):
"""
Interface for providing checkout phase for a service.
Items specified in ``front_service_checkout_phase_provider`` provide
category should implement this interface.
"""
@abc.abstractmethod
def get_checkout_phase(self, checkout_process, service):
"""
Get checkout phase for given service.
If this provider is for another service, then the return value
will be None.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: shuup.front.checkout.CheckoutPhaseViewMixin|None
"""
pass
class BasicServiceCheckoutPhaseProvider(ServiceCheckoutPhaseProvider):
"""
Helper for implementing basic ServiceCheckoutPhaseProvider.
This helper should be useful for most cases, where one only has to
provide a checkout phase for certain service provider type just by
initializing some predefined class.
"""
phase_class = None # override in subclass
service_provider_class = None # override in subclass
def get_checkout_phase(self, checkout_process, service):
"""
Get checkout phase for given service.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: shuup.front.checkout.CheckoutPhaseViewMixin|None
"""
assert issubclass(self.phase_class, CheckoutPhaseViewMixin)
assert issubclass(self.service_provider_class, ServiceProvider)
if isinstance(service.provider, self.service_provider_class):
return checkout_process.instantiate_phase_class(
self.phase_class, service=service)
return None
def get_checkout_phases_for_service(checkout_process, service):
"""
Get checkout phases for given service.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: Iterable[shuup.front.checkout.CheckoutPhaseViewMixin]
"""
classes = get_provide_objects("front_service_checkout_phase_provider")
for provider_cls in classes:
provider = provider_cls()
assert isinstance(provider, ServiceCheckoutPhaseProvider)
phase = provider.get_checkout_phase(checkout_process, service)
if phase:
assert isinstance(phase, CheckoutPhaseViewMixin)
yield phase
|
agpl-3.0
| 6,591,927,049,728,485,000
| 33.903614
| 74
| 0.714187
| false
| 4.260294
| false
| false
| false
|
saullocastro/pyNastran
|
pyNastran/converters/dev/obj/obj_reader.py
|
1
|
3015
|
from __future__ import print_function
from numpy import array, unique, hstack, zeros
class OBJ(object):
def __init__(self):
pass
def read_obj(self, obj_filename):
"""
v -0.0817245 0.000635 0.00421862
v -0.0817245 0.000580371 0.00421862
v -0.0817245 -0.000635 0.00421862
l 1 2
l 2 3
"""
nodes = []
lines = []
#faces = []
with open(obj_filename, 'r') as obj_file:
for line in f.readlines():
sline = line.strip().split()
#print(sline)
Type = sline[0]
if Type == 'v': # vertex
nodes.append(sline[1:])
elif Type == 'l': # line
lines.append(sline[1:])
#elif Type == 'vt': # texture coordinate
#lines.append(sline[1:])
#elif Type == 'vn': # normal vector (not unit vector)
#lines.append(sline[1:])
#elif Type == 'vp': # parameter space vertex
#lines.append(sline[1:])
else:
raise NotImplementedError(sline)
self.nodes = array(nodes, dtype='float64')
# make it 0-based instead of 1 based
self.lines = array(lines, dtype='int32') - 1
self.make_elements()
def make_elements(self):
#print(self.nodes.shape)
unodes, indicies = unique_rows(self.nodes, return_inverse=True)
#print(unodes)
#print(list(indicies))
#print(unodes.shape)
#print(indicies.shape)
n1 = self.lines[:, 0]
n2 = self.lines[:, 1]
i1 = indicies[n1]
i2 = indicies[n2]
nrows = len(i1)
#self.lines = hstack([i1, i2], dtype='int32')
self.lines = hstack([i1, i2])
lines2 = zeros((nrows, 2), dtype='int32')
lines2[:, 0] = i1
lines2[:, 1] = i2
self.lines = lines2
#print(self.lines.shape)
self.nodes = unodes
def write_obj(self, obj_filename):
float_fmt = '8.6f'
int_fmt = 'i'
node_fmt = 'v %%%s %%%s %%%s\n' % (float_fmt, float_fmt, float_fmt)
line_fmt = 'l %%%s %%%s\n' % (int_fmt, int_fmt)
#print(node_fmt)
with open(obj_filename, 'wb') as obj_file:
for node in self.nodes:
obj_file.write(node_fmt % tuple(node))
for line in self.lines + 1:
obj_file.write(line_fmt % tuple(line))
def unique_rows(data, return_inverse=False):
ncols = data.shape[1]
dtype = data.dtype.descr * ncols
struct = data.view(dtype)
uniq, indicies = unique(struct, return_inverse=return_inverse)
uniq = uniq.view(data.dtype).reshape(-1, ncols)
return uniq, indicies
def main(): # pragma: no cover
obj_filename = '6.5e-06_edges.txt'
obj = OBJ()
obj.read_obj(obj_filename)
obj.write_obj('b.txt')
if __name__ == '__main__': # pragma: no cover
main()
|
lgpl-3.0
| -4,943,026,172,035,429,000
| 30.082474
| 76
| 0.511443
| false
| 3.364955
| false
| false
| false
|
pombredanne/invenio
|
modules/bibmerge/lib/bibmerge_engine.py
|
1
|
17312
|
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibMerge Engine."""
import os
from invenio.bibmerge_merger import merge_field_group, replace_field, \
add_field, delete_field, merge_field, \
add_subfield, replace_subfield, \
delete_subfield, copy_R2_to_R1, merge_record
from invenio.search_engine import print_record, perform_request_search, \
get_fieldvalues
from invenio.bibedit_utils import cache_exists, cache_expired, \
create_cache_file, delete_cache_file, get_cache_file_contents, \
get_cache_mtime, latest_record_revision, record_locked_by_other_user, \
record_locked_by_queue, save_xml_record, touch_cache_file, \
update_cache_file_contents, _get_file_path, \
get_record_revision_ids, revision_format_valid_p, split_revid, \
get_marcxml_of_revision_id
from invenio.htmlutils import remove_html_markup
from invenio.search_engine import record_exists
from invenio.bibrecord import create_record, record_xml_output, record_add_field
from invenio.bibedit_config import CFG_BIBEDIT_TO_MERGE_SUFFIX
import invenio.template
bibmerge_templates = invenio.template.load('bibmerge')
def perform_request_init():
"""Handle the initial request.
"""
errors = []
warnings = []
body = ''
# Build page structure and control panel.
body += bibmerge_templates.controlpanel()
body += """
<div id="bibMergeContent">
</div>"""
return body, errors, warnings
def perform_request_ajax(req, uid, data):
"""Ajax request dispatcher.\
"""
requestType = data['requestType']
if requestType in ('getRecordCompare', 'submit', 'cancel', 'recCopy', \
'recMerge', 'recMergeNC'):
return perform_request_record(requestType, uid, data)
elif requestType in ('getFieldGroup', 'getFieldGroupDiff', \
'mergeFieldGroup', 'mergeNCFieldGroup', 'replaceField', 'addField', \
'deleteField', 'mergeField'):
return perform_request_update_record(requestType, uid, data)
elif requestType in ('deleteSubfield', 'addSubfield', 'replaceSubfield', \
'diffSubfield'):
return perform_small_request_update_record(requestType, uid, data)
elif requestType == "searchCandidates" or requestType == "searchRevisions":
return perform_candidate_record_search(requestType, data)
else:
return { 'resultCode': 1, 'resultText': 'Error unknown' }
def perform_candidate_record_search(requestType, data):
"""Handle search requests.
"""
max_results = 999
too_many = False
result = {
'resultCode': 0,
'resultText': ''
}
if requestType == "searchCandidates":
recids = perform_request_search( p=data['query'] )
if len(recids) > max_results:
too_many = True
else:
captions = [ search_result_info(x) for x in recids ]
alternative_titles = [ remove_html_markup(print_record(x, "hs")) for x in recids ]
search_results = [recids, captions, alternative_titles]
elif requestType == "searchRevisions":
revisions = get_record_revision_ids( data['recID1'] )
captions = [ split_revid(x, 'datetext')[1] for x in revisions ]
search_results = [revisions, captions]
if too_many == True:
result['resultCode'] = 1
result['resultText'] = 'Too many results'
else:
result['results'] = search_results
result['resultText'] = '%s results' % len(search_results[0])
return result
def search_result_info(recid):
"""Return report number of a record or if it doen't exist return the recid
itself.
"""
report_numbers = get_fieldvalues(recid, '037__a')
if len(report_numbers) == 0:
return "#"+str(recid)
else:
return report_numbers[0]
def perform_request_record(requestType, uid, data):
"""Handle 'major' record related requests.
Handle retrieving, submitting or cancelling the merging session.
"""
#TODO add checks before submission and cancel, replace get_bibrecord call
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
record1 = _get_record(recid1, uid, result)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'submit':
if data.has_key('duplicate'):
recid2 = data['duplicate']
record2 = _get_record_slave(recid2, result, 'recid', uid)
if result['resultCode'] != 0: #return in case of error
return result
# mark record2 as deleted
record_add_field(record2, '980', ' ', ' ', '', [('c', 'DELETED')])
# mark record2 as duplicate of record1
record_add_field(record2, '970', ' ', ' ', '', [('d', str(recid1))])
#submit record2
xml_record = record_xml_output(record2)
save_xml_record(recid2, uid, xml_record)
#submit record1
save_xml_record(recid1, uid)
result['resultText'] = 'Record submitted'
return result
elif requestType == 'cancel':
delete_cache_file(recid1, uid)
result['resultText'] = 'Cancelled'
return result
recid2 = data["recID2"]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getRecordCompare':
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records compared'
elif requestType == 'recCopy':
copy_R2_to_R1(record1, record2)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Record copied'
elif requestType == 'recMerge':
merge_record(record1, record2, merge_conflicting_fields=True)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
elif requestType == 'recMergeNC':
merge_record(record1, record2, merge_conflicting_fields=False)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
def perform_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a field level.
Handle merging, adding, or replacing of fields.
"""
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
record_content = get_cache_file_contents(recid1, uid)
cache_dirty = record_content[0]
rec_revision = record_content[1]
record1 = record_content[2]
pending_changes = record_content[3]
disabled_hp_changes = record_content[4]
# We will not be able to Undo/Redo correctly after any modifications
# from the level of bibmerge are performed ! We clear all the undo/redo
# lists
undo_list = []
redo_list = []
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getFieldGroup':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = 'Field group retrieved'
return result
elif requestType == 'getFieldGroupDiff':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'], True)
result['resultText'] = 'Fields compared'
return result
elif requestType == 'mergeFieldGroup' or requestType == 'mergeNCFieldGroup':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
if requestType == 'mergeNCFieldGroup':
merge_field_group(record1, record2, fnum, ind1, ind2, False)
else:
merge_field_group(record1, record2, fnum, ind1, ind2, True)
resultText = 'Field group merged'
elif requestType == 'replaceField' or requestType == 'addField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
if requestType == 'replaceField':
replace_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field replaced'
else: # requestType == 'addField'
add_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field added'
elif requestType == 'deleteField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
if findex1 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
delete_field(record1, fnum, findex1)
resultText = 'Field deleted'
elif requestType == 'mergeField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
merge_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = resultText
update_cache_file_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, undo_list, redo_list)
return result
def perform_small_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a subfield level.
Handle adding, replacing or deleting of subfields.
"""
result = {
'resultCode': 0,
'resultText': '',
'resultHtml': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
cache_content = get_cache_file_contents(recid1, uid) #TODO: check mtime, existence
cache_dirty = cache_content[0]
rec_revision = cache_content[1]
record1 = cache_content[2]
pending_changes = cache_content[3]
disabled_hp_changes = cache_content[4]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
ftag, findex1 = _field_info(data['fieldCode1'])
fnum = ftag[:3]
findex2 = _field_info(data['fieldCode2'])[1]
sfindex1 = data['sfindex1']
sfindex2 = data['sfindex2']
if requestType == 'deleteSubfield':
delete_subfield(record1, fnum, findex1, sfindex1)
result['resultText'] = 'Subfield deleted'
elif requestType == 'addSubfield':
add_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield added'
elif requestType == 'replaceSubfield':
replace_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield replaced'
elif requestType == 'diffSubfield':
result['resultHtml'] = bibmerge_templates.BM_html_subfield_row_diffed(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfields diffed'
update_cache_file_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, [], [])
return result
def _get_record(recid, uid, result, fresh_record=False):
"""Retrieve record structure.
"""
record = None
mtime = None
cache_dirty = None
record_status = record_exists(recid)
existing_cache = cache_exists(recid, uid)
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif not existing_cache and record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif existing_cache and cache_expired(recid, uid) and \
record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
if fresh_record:
delete_cache_file(recid, uid)
existing_cache = False
if not existing_cache:
record_revision, record = create_cache_file(recid, uid)
mtime = get_cache_mtime(recid, uid)
cache_dirty = False
else:
tmpRes = get_cache_file_contents(recid, uid)
cache_dirty, record_revision, record = tmpRes[0], tmpRes[1], tmpRes[2]
touch_cache_file(recid, uid)
mtime = get_cache_mtime(recid, uid)
if not latest_record_revision(recid, record_revision):
result['cacheOutdated'] = True
result['resultCode'], result['resultText'], result['cacheDirty'], result['cacheMTime'] = 0, 'Record OK', cache_dirty, mtime
return record
def _get_record_slave(recid, result, mode=None, uid=None):
"""Check if record exists and return it in dictionary format.
If any kind of error occurs returns None.
If mode=='revision' then recid parameter is considered as revid."""
record = None
if recid == 'none':
mode = 'none'
if mode == 'recid':
record_status = record_exists(recid)
#check for errors
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
record = create_record( print_record(recid, 'xm') )[0]
elif mode == 'tmpfile':
file_path = '%s_%s.xml' % (_get_file_path(recid, uid),
CFG_BIBEDIT_TO_MERGE_SUFFIX)
if not os.path.isfile(file_path): #check if file doesn't exist
result['resultCode'], result['resultText'] = 1, 'Temporary file doesnt exist'
else: #open file
tmpfile = open(file_path, 'r')
record = create_record( tmpfile.read() )[0]
tmpfile.close()
elif mode == 'revision':
if revision_format_valid_p(recid):
marcxml = get_marcxml_of_revision_id(recid)
if marcxml:
record = create_record(marcxml)[0]
else:
result['resultCode'], result['resultText'] = 1, 'The specified revision does not exist'
else:
result['resultCode'], result['resultText'] = 1, 'Invalid revision id'
elif mode == 'none':
return {}
else:
result['resultCode'], result['resultText'] = 1, 'Invalid record mode for record2'
return record
def _field_info(fieldIdCode):
"""Returns a tuple: (field-tag, field-index)
eg.: _field_info('R1-8560_-2') --> ('8560_', 2) """
info = fieldIdCode.split('-')
if info[2] == 'None':
info[2] = None
else:
info[2] = int(info[2])
return tuple( info[1:] )
def _fieldtagNum_and_indicators(fieldTag):
"""Separate a 5-char field tag to a 3-character field-tag number and two
indicators"""
fnum, ind1, ind2 = fieldTag[:3], fieldTag[3], fieldTag[4]
if ind1 == '_':
ind1 = ' '
if ind2 == '_':
ind2 = ' '
return (fnum, ind1, ind2)
|
gpl-2.0
| -5,580,802,279,621,333,000
| 39.734118
| 139
| 0.628524
| false
| 3.735059
| false
| false
| false
|
mbohlool/client-python
|
kubernetes/client/models/v1beta2_deployment_strategy.py
|
1
|
4295
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DeploymentStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rolling_update': 'V1beta2RollingUpdateDeployment',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None):
"""
V1beta2DeploymentStrategy - a model defined in Swagger
"""
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""
Gets the rolling_update of this V1beta2DeploymentStrategy.
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.
:return: The rolling_update of this V1beta2DeploymentStrategy.
:rtype: V1beta2RollingUpdateDeployment
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""
Sets the rolling_update of this V1beta2DeploymentStrategy.
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.
:param rolling_update: The rolling_update of this V1beta2DeploymentStrategy.
:type: V1beta2RollingUpdateDeployment
"""
self._rolling_update = rolling_update
@property
def type(self):
"""
Gets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.
:return: The type of this V1beta2DeploymentStrategy.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.
:param type: The type of this V1beta2DeploymentStrategy.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DeploymentStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -6,132,782,709,722,817,000
| 26.88961
| 105
| 0.570664
| false
| 4.265144
| false
| false
| false
|
gedhe/sidesa2.0
|
kejadian_lain.py
|
1
|
28823
|
#Boa:Frame:kejadian_lain
import os
import wx
import wx.lib.buttons
import data_penduduk
import sqlite3
import string
import gettext
import peringatan
db = sqlite3.connect('/opt/sidesa/sidesa')
cur = db.cursor()
def create(parent):
return kejadian_lain(parent)
[wxID_KEJADIAN_LAIN, wxID_KEJADIAN_LAINCARI_KK, wxID_KEJADIAN_LAINDOKUMEN,
wxID_KEJADIAN_LAININPUT_ALAMAT, wxID_KEJADIAN_LAININPUT_AYAH,
wxID_KEJADIAN_LAININPUT_DUSUN, wxID_KEJADIAN_LAININPUT_IBU,
wxID_KEJADIAN_LAININPUT_NAMA, wxID_KEJADIAN_LAININPUT_NIK,
wxID_KEJADIAN_LAININPUT_NO, wxID_KEJADIAN_LAININPUT_NO_KK,
wxID_KEJADIAN_LAININPUT_RT, wxID_KEJADIAN_LAININPUT_RW,
wxID_KEJADIAN_LAININPUT_TEMPAT_LAHIR, wxID_KEJADIAN_LAINISIPENDUDUK,
wxID_KEJADIAN_LAINKEMBALI, wxID_KEJADIAN_LAINKETERANGAN,
wxID_KEJADIAN_LAINLABEL_AGAMA, wxID_KEJADIAN_LAINLABEL_ALAMAT,
wxID_KEJADIAN_LAINLABEL_DATA_PENDUDUK, wxID_KEJADIAN_LAINLABEL_DIFABELITAS,
wxID_KEJADIAN_LAINLABEL_DUSUN, wxID_KEJADIAN_LAINLABEL_GOLONGAN_DARAH,
wxID_KEJADIAN_LAINLABEL_JENIS_KELAMIN,
wxID_KEJADIAN_LAINLABEL_KEWARGANEGARAAN, wxID_KEJADIAN_LAINLABEL_KONTRASEPSI,
wxID_KEJADIAN_LAINLABEL_NAMA_AYAH, wxID_KEJADIAN_LAINLABEL_NAMA_IBU,
wxID_KEJADIAN_LAINLABEL_NAMA_LENGKAP, wxID_KEJADIAN_LAINLABEL_NOMOR_KK,
wxID_KEJADIAN_LAINLABEL_PEKERJAAN, wxID_KEJADIAN_LAINLABEL_PEKERJAAN_LAINNYA,
wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TEMPUH,
wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TERAKHIR,
wxID_KEJADIAN_LAINLABEL_RESIKO_KEHAMILAN, wxID_KEJADIAN_LAINLABEL_SHDK,
wxID_KEJADIAN_LAINLABEL_STATUS_KEPENDUDUKAN,
wxID_KEJADIAN_LAINLABEL_STATUS_PERKAWINAN,
wxID_KEJADIAN_LAINLABEL_STATUS_TINGGAL,
wxID_KEJADIAN_LAINLABEL_TANGGAL_LAHIR, wxID_KEJADIAN_LAINLABEL_TEMPAT_LAHIR,
wxID_KEJADIAN_LAINLAPORAN, wxID_KEJADIAN_LAINLEBEL_NIK,
wxID_KEJADIAN_LAINNAMA_KK, wxID_KEJADIAN_LAINPILIHAN_AGAMA,
wxID_KEJADIAN_LAINPILIHAN_DIFABELITAS,
wxID_KEJADIAN_LAINPILIHAN_GOLONGAN_DARAH,
wxID_KEJADIAN_LAINPILIHAN_JENIS_KELAMIN, wxID_KEJADIAN_LAINPILIHAN_KEHAMILAN,
wxID_KEJADIAN_LAINPILIHAN_KONTRASEPSI, wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN,
wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN_LAINNYA,
wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_DITEMPUH,
wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_TERAKHIR,
wxID_KEJADIAN_LAINPILIHAN_SHDK, wxID_KEJADIAN_LAINPILIHAN_STATUS,
wxID_KEJADIAN_LAINPILIHAN_STATUS_KEPENDUDUKAN,
wxID_KEJADIAN_LAINPILIHAN_STATUS_TINGGAL,
wxID_KEJADIAN_LAINPILIHAN_WARGANEGARA, wxID_KEJADIAN_LAINSIMPANGAMBAR,
wxID_KEJADIAN_LAINSTATICTEXT1, wxID_KEJADIAN_LAINSTATICTEXT2,
wxID_KEJADIAN_LAINSTATICTEXT3, wxID_KEJADIAN_LAINSTATICTEXT4,
wxID_KEJADIAN_LAINSTATICTEXT5, wxID_KEJADIAN_LAINSTATICTEXT6,
wxID_KEJADIAN_LAINSTATICTEXT7, wxID_KEJADIAN_LAINTANGGALKEJADIAN,
wxID_KEJADIAN_LAINTANGGAL_LAHIR, wxID_KEJADIAN_LAINTGLKEJADIAN,
wxID_KEJADIAN_LAINTOMBOL_CARI, wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA,
] = [wx.NewId() for _init_ctrls in range(72)]
class kejadian_lain(wx.Dialog):
def _init_coll_isipenduduk_Columns(self, parent):
# generated method, don't edit
parent.InsertColumn(col=0, format=wx.LIST_FORMAT_LEFT,
heading='Nama Penduduk', width=150)
parent.InsertColumn(col=1, format=wx.LIST_FORMAT_LEFT,
heading='Nomor KK', width=250)
parent.InsertColumn(col=2, format=wx.LIST_FORMAT_LEFT, heading='Alamat',
width=260)
parent.InsertColumn(col=3, format=wx.LIST_FORMAT_LEFT, heading='Dusun',
width=100)
parent.InsertColumn(col=4, format=wx.LIST_FORMAT_LEFT, heading='RT',
width=40)
parent.InsertColumn(col=5, format=wx.LIST_FORMAT_LEFT, heading='RW',
width=40)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_KEJADIAN_LAIN,
name=u'edit_kejadian_lain', parent=prnt, pos=wx.Point(406, 79),
size=wx.Size(888, 639), style=wx.FRAME_NO_TASKBAR,
title=u'Kejadian Lain')
self.SetClientSize(wx.Size(888, 639))
self.Center(wx.BOTH)
self.label_nomor_kk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NOMOR_KK,
label=u'Nomor KK', name=u'label_nomor_kk', parent=self,
pos=wx.Point(8, 152), size=wx.Size(168, 17),
style=wx.TE_READONLY)
self.label_alamat = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_ALAMAT,
label=u'Alamat', name=u'label_alamat', parent=self,
pos=wx.Point(256, 152), size=wx.Size(47, 17), style=0)
self.label_dusun = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DUSUN,
label=u'Dusun', name=u'label_dusun', parent=self,
pos=wx.Point(552, 152), size=wx.Size(144, 17), style=0)
self.lebel_nik = wx.StaticText(id=wxID_KEJADIAN_LAINLEBEL_NIK,
label=u'N I K *', name=u'lebel_nik', parent=self,
pos=wx.Point(192, 192), size=wx.Size(40, 17), style=0)
self.label_tempat_lahir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_TEMPAT_LAHIR,
label=u'Tempat Lahir', name=u'label_tempat_lahir', parent=self,
pos=wx.Point(192, 312), size=wx.Size(176, 17), style=0)
self.label_tanggal_lahir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_TANGGAL_LAHIR,
label=u'Tanggal Lahir', name=u'label_tanggal_lahir', parent=self,
pos=wx.Point(192, 352), size=wx.Size(152, 17), style=0)
self.label_golongan_darah = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_GOLONGAN_DARAH,
label=u'Golongan Darah', name=u'label_golongan_darah',
parent=self, pos=wx.Point(192, 392), size=wx.Size(200, 17),
style=0)
self.label_nama_lengkap = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_LENGKAP,
label=u'Nama Lengkap', name=u'label_nama_lengkap', parent=self,
pos=wx.Point(192, 232), size=wx.Size(98, 17), style=0)
self.label_jenis_kelamin = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_JENIS_KELAMIN,
label=u'Jenis Kelamin', name=u'label_jenis_kelamin', parent=self,
pos=wx.Point(192, 272), size=wx.Size(152, 17), style=0)
self.label_agama = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_AGAMA,
label=u'Agama', name=u'label_agama', parent=self,
pos=wx.Point(400, 192), size=wx.Size(120, 17), style=0)
self.input_no_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NO_KK,
name=u'input_no_kk', parent=self, pos=wx.Point(8, 168),
size=wx.Size(240, 25), style=wx.TE_READONLY, value=u'')
self.input_alamat = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_ALAMAT,
name=u'input_alamat', parent=self, pos=wx.Point(256, 168),
size=wx.Size(288, 25), style=wx.TE_READONLY, value=u'')
self.input_dusun = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_DUSUN,
name=u'input_dusun', parent=self, pos=wx.Point(552, 168),
size=wx.Size(192, 25), style=wx.TE_READONLY, value=u'')
self.input_rt = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_RT,
name=u'input_rt', parent=self, pos=wx.Point(752, 168),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.input_rw = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_RW,
name=u'input_rw', parent=self, pos=wx.Point(816, 168),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.input_nik = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NIK,
name=u'input_nik', parent=self, pos=wx.Point(192, 208),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.input_nama = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NAMA,
name=u'input_nama', parent=self, pos=wx.Point(192, 248),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_jenis_kelamin = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_JENIS_KELAMIN,
name=u'pilihan_jenis_kelamin', parent=self, pos=wx.Point(192,
288), size=wx.Size(200, 27), style=wx.TE_READONLY, value=u'')
self.input_tempat_lahir = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_TEMPAT_LAHIR,
name=u'input_tempat_lahir', parent=self, pos=wx.Point(192, 328),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.tanggalkejadian = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGALKEJADIAN,
name=u'tanggalkejadian', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.pilihan_golongan_darah = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_GOLONGAN_DARAH,
name=u'pilihan_golongan_darah', parent=self, pos=wx.Point(192,
408), size=wx.Size(80, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_agama = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_AGAMA,
name=u'pilihan_agama', parent=self, pos=wx.Point(400, 208),
size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_kewarganegaraan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_KEWARGANEGARAAN,
label=u'Kewarganegaraan', name=u'label_kewarganegaraan',
parent=self, pos=wx.Point(400, 232), size=wx.Size(168, 17),
style=0)
self.pilihan_warganegara = wx.TextCtrl(name=u'pilihan_warganegara',
parent=self, pos=wx.Point(400, 248), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pendidikan_terakhir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TERAKHIR,
label=u'Pendidikan Terakhir', name=u'label_pendidikan_terakhir',
parent=self, pos=wx.Point(400, 272), size=wx.Size(184, 17),
style=0)
self.pilihan_pendidikan_terakhir = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_TERAKHIR,
name=u'pilihan_pendidikan_terakhir', parent=self,
pos=wx.Point(400, 288), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pendidikan_tempuh = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TEMPUH,
label=u'Pendidikan Saat Ini Ditempuh',
name=u'label_pendidikan_tempuh', parent=self, pos=wx.Point(400,
312), size=wx.Size(264, 17), style=0)
self.pilihan_pendidikan_ditempuh = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_DITEMPUH,
name=u'pilihan_pendidikan_ditempuh', parent=self,
pos=wx.Point(400, 328), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pekerjaan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PEKERJAAN,
label=u'Pekerjaan Utama', name=u'label_pekerjaan', parent=self,
pos=wx.Point(400, 352), size=wx.Size(200, 17), style=0)
self.pilihan_pekerjaan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN,
name=u'pilihan_pekerjaan', parent=self, pos=wx.Point(400, 370),
size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_pekerjaan_lainnya = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PEKERJAAN_LAINNYA,
label=u'Pekerjaan Lainnya', name=u'label_pekerjaan_lainnya',
parent=self, pos=wx.Point(400, 392), size=wx.Size(168, 17),
style=0)
self.pilihan_pekerjaan_lainnya = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN_LAINNYA,
name=u'pilihan_pekerjaan_lainnya', parent=self, pos=wx.Point(400,
408), size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_status_perkawinan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_PERKAWINAN,
label=u'Status Perkawinan', name=u'label_status_perkawinan',
parent=self, pos=wx.Point(624, 192), size=wx.Size(176, 17),
style=0)
self.pilihan_status = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS,
name=u'pilihan_status', parent=self, pos=wx.Point(624, 208),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_status_kependudukan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_KEPENDUDUKAN,
label=u'Status Kependudukan', name=u'label_status_kependudukan',
parent=self, pos=wx.Point(624, 232), size=wx.Size(184, 17),
style=0)
self.pilihan_status_kependudukan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS_KEPENDUDUKAN,
name=u'pilihan_status_kependudukan', parent=self,
pos=wx.Point(624, 248), size=wx.Size(248, 25),
style=wx.TE_READONLY, value=u'')
self.label_status_tinggal = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_TINGGAL,
label=u'Status Tinggal', name=u'label_status_tinggal',
parent=self, pos=wx.Point(624, 272), size=wx.Size(152, 17),
style=0)
self.pilihan_status_tinggal = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS_TINGGAL,
name=u'pilihan_status_tinggal', parent=self, pos=wx.Point(624,
288), size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_difabelitas = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DIFABELITAS,
label=u'Penyandang Difabelitas', name=u'label_difabelitas',
parent=self, pos=wx.Point(624, 312), size=wx.Size(184, 17),
style=0)
self.pilihan_difabelitas = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_DIFABELITAS,
name=u'pilihan_difabelitas', parent=self, pos=wx.Point(624, 328),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_kontrasepsi = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_KONTRASEPSI,
label=u'Penggunaan Kontrasepsi', name=u'label_kontrasepsi',
parent=self, pos=wx.Point(624, 352), size=wx.Size(192, 17),
style=0)
self.pilihan_kontrasepsi = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_KONTRASEPSI,
name=u'pilihan_kontrasepsi', parent=self, pos=wx.Point(624, 368),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_kehamilan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_KEHAMILAN,
name=u'pilihan_kehamilan', parent=self, pos=wx.Point(624, 408),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.laporan = wx.TextCtrl(id=wxID_KEJADIAN_LAINLAPORAN,
name=u'laporan', parent=self, pos=wx.Point(136, 496),
size=wx.Size(192, 27), style=0, value=u'')
self.keterangan = wx.TextCtrl(id=wxID_KEJADIAN_LAINKETERANGAN,
name=u'keterangan', parent=self, pos=wx.Point(416, 496),
size=wx.Size(448, 27), style=0, value=u'')
self.label_shdk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_SHDK,
label=u'Status Hubungan Dalam Keluarga', name=u'label_shdk',
parent=self, pos=wx.Point(24, 536), size=wx.Size(320, 17),
style=0)
self.pilihan_shdk = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_SHDK,
name=u'pilihan_shdk', parent=self, pos=wx.Point(24, 560),
size=wx.Size(304, 25), style=wx.TE_READONLY, value=u'')
self.label_nama_ayah = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_AYAH,
label=u'Nama Ayah', name=u'label_nama_ayah', parent=self,
pos=wx.Point(344, 536), size=wx.Size(152, 17), style=0)
self.input_ayah = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_AYAH,
name=u'input_ayah', parent=self, pos=wx.Point(344, 560),
size=wx.Size(280, 25), style=wx.TE_READONLY, value=u'')
self.label_nama_ibu = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_IBU,
label=u'Nama Ibu', name=u'label_nama_ibu', parent=self,
pos=wx.Point(632, 536), size=wx.Size(160, 17), style=0)
self.input_ibu = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_IBU,
name=u'input_ibu', parent=self, pos=wx.Point(632, 560),
size=wx.Size(240, 25), style=wx.TE_READONLY, value=u'')
self.label_resiko_kehamilan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_RESIKO_KEHAMILAN,
label=u'Resiko Kehamilan', name=u'label_resiko_kehamilan',
parent=self, pos=wx.Point(624, 392), size=wx.Size(176, 17),
style=0)
self.tombol_tambah_data = wx.Button(id=wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA,
label=u'Tambah Data', name=u'tombol_tambah_data', parent=self,
pos=wx.Point(240, 600), size=wx.Size(200, 32), style=0)
self.tombol_tambah_data.Bind(wx.EVT_BUTTON,
self.OnTombol_tambah_dataButton,
id=wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA)
self.kembali = wx.Button(id=wxID_KEJADIAN_LAINKEMBALI,
label=u'Kembali Ke Menu', name=u'kembali', parent=self,
pos=wx.Point(456, 600), size=wx.Size(208, 32), style=0)
self.kembali.Bind(wx.EVT_BUTTON, self.OnKembaliButton,
id=wxID_KEJADIAN_LAINKEMBALI)
self.dokumen = wx.StaticText(id=wxID_KEJADIAN_LAINDOKUMEN,
label=u'Catatan Kejadian Penduduk Lainnya', name=u'dokumen',
parent=self, pos=wx.Point(16, 440), size=wx.Size(304, 17),
style=0)
self.label_data_penduduk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DATA_PENDUDUK,
label=u'FORM DATA PENDUDUK', name=u'label_data_penduduk',
parent=self, pos=wx.Point(336, 0), size=wx.Size(216, 17),
style=0)
self.isipenduduk = wx.ListCtrl(id=wxID_KEJADIAN_LAINISIPENDUDUK,
name=u'isipenduduk', parent=self, pos=wx.Point(16, 16),
size=wx.Size(856, 104), style=wx.LC_REPORT)
self._init_coll_isipenduduk_Columns(self.isipenduduk)
self.isipenduduk.Bind(wx.EVT_LIST_ITEM_SELECTED,
self.OnIsipendudukListItemSelected,
id=wxID_KEJADIAN_LAINISIPENDUDUK)
self.staticText1 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT1,
label=u'Nama Lengkap', name='staticText1', parent=self,
pos=wx.Point(400, 128), size=wx.Size(145, 17), style=0)
self.cari_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAINCARI_KK,
name=u'cari_kk', parent=self, pos=wx.Point(552, 128),
size=wx.Size(224, 24), style=0, value='')
self.tombol_cari = wx.Button(id=wxID_KEJADIAN_LAINTOMBOL_CARI,
label=u'Cari', name=u'tombol_cari', parent=self, pos=wx.Point(784,
128), size=wx.Size(85, 24), style=0)
self.tombol_cari.Bind(wx.EVT_BUTTON, self.OnTombol_cariButton,
id=wxID_KEJADIAN_LAINTOMBOL_CARI)
self.input_no = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NO,
name=u'input_no', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.staticText2 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT2,
label=u'RT', name='staticText2', parent=self, pos=wx.Point(760,
152), size=wx.Size(24, 16), style=0)
self.staticText3 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT3,
label=u'RW', name='staticText3', parent=self, pos=wx.Point(824,
152), size=wx.Size(19, 17), style=0)
self.staticText4 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT4,
label=u'Pemberi Laporan', name='staticText4', parent=self,
pos=wx.Point(16, 504), size=wx.Size(118, 17), style=0)
self.staticText5 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT5,
label=u'Keterangan', name='staticText5', parent=self,
pos=wx.Point(336, 504), size=wx.Size(74, 17), style=0)
self.staticText6 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT6,
label=u'Tanggal Kejadian', name='staticText6', parent=self,
pos=wx.Point(16, 464), size=wx.Size(106, 17), style=0)
self.tglkejadian = wx.DatePickerCtrl(id=wxID_KEJADIAN_LAINTGLKEJADIAN,
name='tglkejadian', parent=self, pos=wx.Point(136, 464),
size=wx.Size(192, 26), style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)
self.tglkejadian.Bind(wx.EVT_DATE_CHANGED, self.OnGetDate)
self.nama_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAINNAMA_KK,
name=u'nama_kk', parent=self, pos=wx.Point(8, 208),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.staticText7 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT7,
label=u'Nama Kepala Keluarga', name='staticText7', parent=self,
pos=wx.Point(8, 192), size=wx.Size(135, 17), style=0)
self.tanggalkejadian = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGALKEJADIAN,
name=u'tanggalkejadian', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.simpangambar = wx.TextCtrl(id=wxID_KEJADIAN_LAINSIMPANGAMBAR,
name=u'simpangambar', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(152, 24), style=0, value=u'')
self.tanggal_lahir = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGAL_LAHIR,
name=u'tanggal_lahir', parent=self, pos=wx.Point(192, 368),
size=wx.Size(200, 27), style=0, value=u'')
def __init__(self, parent):
self._init_ctrls(parent)
self.awal()
def awal(self):
self.loadgambar()
self.IsiList()
self.input_no_kk.SetValue('')
self.input_alamat.SetValue('')
self.input_dusun.SetValue('')
self.input_rt.SetValue('')
self.input_rw.SetValue('')
self.input_nik.SetValue('')
self.input_nama.SetValue('')
self.pilihan_jenis_kelamin.SetValue('')
self.input_tempat_lahir.SetValue('')
self.tanggal_lahir.SetValue('')
self.pilihan_golongan_darah.SetValue('')
self.pilihan_agama.SetValue('')
self.pilihan_warganegara.SetValue('')
self.pilihan_pendidikan_terakhir.SetValue('')
self.pilihan_pendidikan_ditempuh.SetValue('')
self.pilihan_pekerjaan.SetValue('')
self.pilihan_pekerjaan_lainnya.SetValue('')
self.pilihan_status.SetValue('')
self.pilihan_status_kependudukan.SetValue('')
self.pilihan_status_tinggal.SetValue('')
self.pilihan_difabelitas.SetValue('')
self.pilihan_kontrasepsi.SetValue('')
self.pilihan_kehamilan.SetValue('')
self.pilihan_shdk.SetValue('')
self.input_ayah.SetValue('')
self.input_ibu.SetValue('')
self.laporan.SetValue('')
self.keterangan.SetValue('')
self.cari_kk.SetValue('')
self.nama_kk.SetValue('')
self.input_no.SetValue('')
def OnGetDate(self, event):
selected = self.tglkejadian.GetValue()
month = selected.Month + 1
day = selected.Day
year = selected.Year
date_str = "%02d/%02d/%4d" % (month, day, year)
self.tanggalkejadian.SetValue("{}".format(date_str))
def loadgambar(self):
self.PhotoMaxSize = 130
img = wx.EmptyImage(120,130)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY, wx.BitmapFromImage(img),wx.Point(52, 251))
def IsiList(self):
self.isipenduduk.DeleteAllItems()
sql = "SELECT * FROM penduduk WHERE kematian='Tidak'"
cur.execute(sql)
hasil = cur.fetchall()
nokk = self.isipenduduk.GetItemCount()
for i in hasil :
self.isipenduduk.InsertStringItem(nokk, "%s"%i[1])
self.isipenduduk.SetStringItem(nokk,1,"%s"%i[2])
self.isipenduduk.SetStringItem(nokk,2,"%s"%i[21])
self.isipenduduk.SetStringItem(nokk,3,"%s"%i[29])
self.isipenduduk.SetStringItem(nokk,4,"%s"%i[26])
self.isipenduduk.SetStringItem(nokk,5,"%s"%i[27])
nokk = nokk + 1
def Isi_Object(self) :
carikk=str(self.cari_kk.GetValue())
sql="SELECT * FROM penduduk WHERE nik='%s'"%(carikk)
cur.execute(sql)
hasil = cur.fetchone()
if hasil :
self.input_no_kk.SetValue(str(hasil[16]))
self.nama_kk.SetValue(str(hasil[17]))
self.input_alamat.SetValue(str(hasil[21]))
self.input_dusun.SetValue(str(hasil[29]))
self.input_rt.SetValue(str(hasil[26]))
self.input_rw.SetValue(str(hasil[27]))
self.input_nik.SetValue(str(hasil[1]))
self.input_nama.SetValue(str(hasil[2]))
self.pilihan_jenis_kelamin.SetValue(str(hasil[3]))
self.input_tempat_lahir.SetValue(str(hasil[4]))
self.tanggal_lahir.SetValue(str(hasil[5]))
self.pilihan_golongan_darah.SetValue(str(hasil[7]))
self.pilihan_agama.SetValue(str(hasil[8]))
self.pilihan_warganegara.SetValue(str(hasil[28]))
self.pilihan_pendidikan_terakhir.SetValue(str(hasil[12]))
self.pilihan_pendidikan_ditempuh.SetValue(str(hasil[31]))
self.pilihan_pekerjaan.SetValue(str(hasil[13]))
self.pilihan_pekerjaan_lainnya.SetValue(str(hasil[19]))
self.pilihan_status.SetValue(str(hasil[9]))
self.pilihan_status_kependudukan.SetValue(str(hasil[32]))
self.pilihan_status_tinggal.SetValue(str(hasil[33]))
self.pilihan_difabelitas.SetValue(str(hasil[34]))
self.pilihan_kontrasepsi.SetValue(str(hasil[35]))
self.pilihan_kehamilan.SetValue(str(hasil[36]))
self.pilihan_shdk.SetValue(str(hasil[10]))
self.input_ayah.SetValue(str(hasil[15]))
self.input_ibu.SetValue(str(hasil[14]))
self.simpangambar.SetValue(str(hasil[57]))
self.input_no.SetValue(str(hasil[0]))
self.viewgambar()
else :
self.pesan = wx.MessageDialog(self,"Data Tidak Ada","Konfirmasi",wx.OK)
self.pesan.ShowModal()
self.cari_kk.Clear()
self.cari_kk.SetFocus()
def viewgambar(self):
filepath=self.simpangambar.GetValue()
img = wx.Image(filepath, wx.BITMAP_TYPE_ANY)
# scale the image, preserving the aspect ratio
W = img.GetWidth()
H = img.GetHeight()
if W > H:
NewW = self.PhotoMaxSize
NewH = self.PhotoMaxSize * H / W
else:
NewH = self.PhotoMaxSize
NewW = self.PhotoMaxSize * W / H
img = img.Scale(NewW,NewH)
self.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
def OnTombol_kembali_kemenuButton(self, event):
self.main=data_penduduk.create(None)
self.main.Show()
self.Close()
self.Destroy()
def OnTombol_tambah_dataButton(self, event):
nokk = str(self.input_no_kk.GetValue())
nik = str(self.input_nik.GetValue())
nama = str(self.input_nama.GetValue())
kejadian = str(self.tanggalkejadian.GetValue())
laporan = str(self.laporan.GetValue())
keterangan = str(self.keterangan.GetValue())
inputno = str(self.input_no.GetValue())
if laporan == '':
self.pesan = wx.MessageDialog(self,"Nama Pelapor Jangan Kosong","Peringatan",wx.OK)
self.pesan.ShowModal()
elif keterangan == '':
self.pesan = wx.MessageDialog(self,"Keterangan Kematian Jangan Kosong","Peringatan",wx.OK)
self.pesan.ShowModal()
else:
add_keluarga="UPDATE penduduk SET kejadianlain='Ya' WHERE no='"+inputno+"'"
cur.execute(add_keluarga)
db.commit()
add_kejadian="INSERT INTO peristiwalain (nomornik,tanggalperistiwa, peristiwa, pemberilaporan, namalengkap, nomorkk) VALUES('"+(nik)+"','"+(kejadian)+"','"+(keterangan)+"','"+(laporan)+"','"+(nama)+"','"+(nokk)+"') "
cur.execute(add_kejadian)
db.commit()
self.pesan = wx.MessageDialog(self,"Data Sudah Tersimpan","Konfirmasi",wx.OK)
self.pesan.ShowModal()
self.awal()
def OnKembaliButton(self, event):
self.main=data_penduduk.create(None)
self.main.Show()
self.Close()
self.Destroy()
def OnTombol_cariButton(self, event):
self.Isi_Object()
def OnIsipendudukListItemSelected(self, event):
self.currentItem = event.m_itemIndex # mengambil no index baris yang dipilih
b=self.isipenduduk.GetItem(self.currentItem).GetText() # no index baris dikonversi ke text/ string
self.cari_kk.SetValue(b)
self.Isi_Object()
event.Skip()
|
gpl-2.0
| 5,543,628,404,911,905,000
| 48.270085
| 229
| 0.630608
| false
| 2.721205
| false
| false
| false
|
hjanime/VisTrails
|
vistrails/packages/URL/http_directory.py
|
1
|
8252
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# https://gist.github.com/remram44/6540454
from __future__ import division
from HTMLParser import HTMLParser
import os
import re
from .https_if_available import build_opener
re_url = re.compile(r'^(([a-zA-Z_-]+)://([^/]+))(/.*)?$')
def resolve_link(link, url):
m = re_url.match(link)
if m is not None:
if not m.group(4):
# http://domain -> http://domain/
return link + '/'
else:
return link
elif link[0] == '/':
# /some/path
murl = re_url.match(url)
return murl.group(1) + link
else:
# relative/path
if url[-1] == '/':
return url + link
else:
return url + '/' + link
class ListingParser(HTMLParser):
"""Parses an HTML file and build a list of links.
Links are stored into the 'links' set. They are resolved into absolute
links.
"""
def __init__(self, url):
HTMLParser.__init__(self)
if url[-1] != '/':
url += '/'
self.__url = url
self.links = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
for key, value in attrs:
if key == 'href':
if not value:
continue
value = resolve_link(value, self.__url)
self.links.add(value)
break
def download_directory(url, target, insecure=False):
def mkdir():
if not mkdir.done:
try:
os.mkdir(target)
except OSError:
pass
mkdir.done = True
mkdir.done = False
opener = build_opener(insecure=insecure)
response = opener.open(url)
if response.info().type == 'text/html':
contents = response.read()
parser = ListingParser(url)
parser.feed(contents)
for link in parser.links:
link = resolve_link(link, url)
if link[-1] == '/':
link = link[:-1]
if not link.startswith(url):
continue
name = link.rsplit('/', 1)[1]
if '?' in name:
continue
mkdir()
download_directory(link, os.path.join(target, name), insecure)
if not mkdir.done:
# We didn't find anything to write inside this directory
# Maybe it's a HTML file?
if url[-1] != '/':
end = target[-5:].lower()
if not (end.endswith('.htm') or end.endswith('.html')):
target = target + '.html'
with open(target, 'wb') as fp:
fp.write(contents)
else:
buffer_size = 4096
with open(target, 'wb') as fp:
chunk = response.read(buffer_size)
while chunk:
fp.write(chunk)
chunk = response.read(buffer_size)
###############################################################################
import unittest
class TestLinkResolution(unittest.TestCase):
def test_absolute_link(self):
self.assertEqual(
resolve_link('http://website.org/p/test.txt',
'http://some/other/url'),
'http://website.org/p/test.txt')
self.assertEqual(
resolve_link('http://website.org',
'http://some/other/url'),
'http://website.org/')
def test_absolute_path(self):
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url/'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site'),
'http://site/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site/'),
'http://site/p/test.txt')
def test_relative_path(self):
self.assertEqual(
resolve_link('some/file', 'http://site/folder'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/file', 'http://site/folder/'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/dir/', 'http://site/folder'),
'http://site/folder/some/dir/')
class TestParser(unittest.TestCase):
def test_parse(self):
parser = ListingParser('http://a.remram.fr/test')
parser.feed("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"><html><head><title>
Index of /test</title></head><body><h1>Index of /test</h1><table><tr><th>
<img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a>
</th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size
</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5">
<hr></th></tr><tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td>
<td><a href="/">Parent Directory</a></td><td> </td><td align="right"> -
</td><td> </td></tr><tr><td valign="top">
<img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="a">a</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> 3 </td><td>
</td></tr><tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td>
<td><a href="/bb">bb</a></td><td align="right">11-Sep-2013 15:46 </td>
<td align="right"> 3 </td><td> </td></tr><tr><td valign="top">
<img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="/cc/">cc/</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> - </td><td>
</td></tr><tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td>
<td><a href="http://a.remram.fr/dd">dd/</a></td><td align="right">
11-Sep-2013 15:46 </td><td align="right"> - </td><td> </td></tr><tr>
<th colspan="5"><hr></th></tr></table></body></html>
""")
links = set(l for l in parser.links if '?' not in l)
self.assertEqual(links, set([
'http://a.remram.fr/',
'http://a.remram.fr/test/a',
'http://a.remram.fr/bb',
'http://a.remram.fr/cc/',
'http://a.remram.fr/dd',
]))
|
bsd-3-clause
| -6,873,373,719,552,639,000
| 37.381395
| 79
| 0.544716
| false
| 3.795768
| true
| false
| false
|
Bajoo/client-pc
|
bajoo/gui/base_view.py
|
1
|
4844
|
# -*- coding: utf-8 -*-
import wx
from ..common.path import resource_filename
from .translator import Translator
from ..common.i18n import N_
class BaseView(Translator):
"""Base class for all views.
This class come with helper functions to configure the view.
Class Attributes:
LIGHT_GRAY (wx.Colour): Predefined background color.
Attributes:
window (wx.Window): the window element the view is in charge.
"""
LIGHT_GRAY = wx.Colour(0xf2, 0xf2, 0xf2)
def __init__(self, window):
Translator.__init__(self)
# wx.Window instance.
self.window = window
def set_frame_title(self, title):
"""Set the title of the wx.Frame containing this Window.
Args:
title (str): new frame title. The title will be translated.
"""
frame = self.window.GetTopLevelParent()
self.register_i18n(frame, frame.SetTitle, title)
def make_sizer(self, direction, items, outside_border=True, flag=0,
proportion=0, sizer=None, border=15):
"""Recursively make sizers with border for simple cases.
Each element given will be added to the sizer, with appropriate
borders. Border between elements (even sub-sizer) will be merged.
Args:
direction: the direction of the first sizer. Can be wx.HORIZONTAL
or wx.VERTICAL.
items (list of wx.Window): a list of all elements to add to the
sizer. If an item is None, a stretchable spacer is added. If
it's another list, this function is called recursively with the
opposite direction.
outside_border (boolean, optional): If set to False, no outside
border are added: Only borders between elements will be
created.
flag (optional): if set, additional flags who will be passed to
each ``sizer.Add()`` call.
proportion (optional): If set, the parameter will be passed to each
``sizer.Add()`` call.
sizer (wx.Sizer, optional): If set, this empty sizer will be used,
instead of creating a new one.
border (integer, optional): size of the border to use
returns:
wx.Sizer: the top-level sizer created.
"""
swap_direction = {
wx.VERTICAL: wx.HORIZONTAL,
wx.HORIZONTAL: wx.VERTICAL
}
if not sizer:
sizer = wx.BoxSizer(direction)
# the first border is implemented as a Spacer,
# because borders of hidden elements don't appears.
if outside_border:
sizer.AddSpacer(border)
for (index, item) in enumerate(items):
if item is None:
sizer.AddStretchSpacer()
continue
flags = 0
if isinstance(item, list):
item = self.make_sizer(swap_direction[direction], item,
outside_border=False)
if isinstance(item, wx.Sizer):
flags |= wx.EXPAND
# Compute flag for merging common border.
if outside_border:
if direction is wx.VERTICAL:
flags |= wx.LEFT | wx.RIGHT
else:
flags |= wx.TOP | wx.BOTTOM
if len(items) - 1 is not index:
if direction is wx.VERTICAL:
flags |= wx.BOTTOM
else:
flags |= wx.RIGHT
flags |= flag
sizer.Add(item, border=border, flag=flags, proportion=proportion)
# last border
if outside_border:
sizer.AddSpacer(border)
return sizer
def create_settings_button_box(self, parent):
"""Create a common box with 3 buttons: ok, cancel, apply"""
btn_ok = wx.Button(parent, wx.ID_OK, name='btn_ok')
btn_cancel = wx.Button(parent, wx.ID_CANCEL, name='btn_cancel')
btn_apply = wx.Button(parent, wx.ID_APPLY, name='btn_apply')
self.register_many_i18n('SetLabel', {
btn_cancel: N_('Cancel'),
btn_ok: N_('OK'),
btn_apply: N_('Apply')
})
# Buttons box
button_box = wx.StdDialogButtonSizer()
button_box.SetAffirmativeButton(btn_ok)
button_box.SetCancelButton(btn_cancel)
button_box.AddButton(btn_apply)
# Layout the button box
button_box.Realize()
return button_box
def set_icon(self):
"""Set the standard Bajoo favicon to the window.
Note that the window must be an instance of wx.Frame.
"""
icon_path = resource_filename('assets/window_icon.png')
icon = wx.Icon(icon_path)
self.window.SetIcon(icon)
|
gpl-3.0
| 7,601,464,904,003,091,000
| 32.638889
| 79
| 0.569777
| false
| 4.360036
| false
| false
| false
|
jim-easterbrook/pywws
|
src/pywws/device_pyusb1.py
|
1
|
4757
|
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-20 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB v1.0.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library
(version 1.0). It is one of several USB device modules, each of which
uses a different USB library interface. See :ref:`Installation - USB
library<dependencies-usb>` for details.
Testing
=======
Run :py:mod:`pywws.testweatherstation` with increased verbosity so it
reports which USB device access module is being used::
python -m pywws.testweatherstation -vv
18:28:09:pywws.weatherstation.CUSBDrive:using pywws.device_pyusb1
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import sys
import usb.core
import usb.util
class USBDevice(object):
"""Low level USB device access via PyUSB 1.0 library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
def __init__(self, idVendor, idProduct):
self.dev = usb.core.find(idVendor=idVendor, idProduct=idProduct)
if not self.dev:
raise IOError("Weather station device not found")
if sys.platform.startswith('linux'):
try:
detach = self.dev.is_kernel_driver_active(0)
except NotImplementedError:
detach = True
if detach:
try:
self.dev.detach_kernel_driver(0)
except usb.core.USBError:
pass
self.dev.reset()
self.dev.set_configuration()
usb.util.claim_interface(self.dev, 0)
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.dev.read(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError('pywws.device_pyusb1.USBDevice.read_data failed')
return list(result)
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True
|
gpl-2.0
| 4,071,569,313,834,991,000
| 33.722628
| 104
| 0.646206
| false
| 3.639633
| false
| false
| false
|
nehresma/gocardless-python
|
test/test_params.py
|
1
|
8989
|
import datetime
import mock
import unittest
import six
from six.moves import urllib
import gocardless
from gocardless import utils, urlbuilder
class ExpiringLimitTestCase(object):
"""superclass factoring out tests for expiring limit param objects"""
def test_interval_length_is_positive(self):
pars = self.create_params(10, "1321230", 1, "day")
with self.assertRaises(ValueError):
pars = self.create_params(10, "1123210", -1, "day")
def test_interval_unit_is_valid(self):
for interval_unit in ["day", "week", "month"]:
pars = self.create_params(10, 10, "11235432", interval_unit)
with self.assertRaises(ValueError):
pars = self.create_params(10, 10, "1432233123", "invalid")
def _future_date_tester(self, argname):
invalid_date = datetime.datetime.now() - datetime.timedelta(100)
valid_date = datetime.datetime.now() + datetime.timedelta(2000)
par1 = self.create_params(10, 10, "23423421", "day", **{argname:valid_date})
with self.assertRaises(ValueError):
par1 = self.create_params(10, 10, "2342341", "day",
**{argname:invalid_date})
def test_expires_at_in_future(self):
self._future_date_tester("expires_at")
def test_interval_count_positive(self):
with self.assertRaises(ValueError):
self.create_params(10, 10, "merchid", "day", interval_count=-1)
class PreAuthParamsTestCase(ExpiringLimitTestCase, unittest.TestCase):
def default_args_construct(self, extra_options):
"""
For testing optional arguments, builds the param object with valid
required arguments and adds optionl arguments as keywords from
`extra_options`
:param extra_options: Extra optional keyword arguments to pass to
the constructor.
"""
return urlbuilder.\
PreAuthorizationParams(12, "3456", 6, "month", **extra_options)
def create_params(self, *args, **kwargs):
return urlbuilder.PreAuthorizationParams(*args, **kwargs)
def test_max_amount_is_positive(self):
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, -1, "1232532", 4, "month")
def test_interval_length_is_a_positive_integer(self):
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, 12, "!2343", -3, "month")
def test_interval_unit_is_one_of_accepted(self):
for unit_type in ["month", "day", "week"]:
pa = urlbuilder.PreAuthorizationParams(12, "1234", 3, unit_type)
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, 21,"1234", 4, "soem other unit")
def test_expires_at_is_later_than_now(self):
earlier = datetime.datetime.now() - datetime.timedelta(1)
self.assertRaises(ValueError, self.default_args_construct, \
{"expires_at":earlier})
def test_interval_count_is_postive_integer(self):
self.assertRaises(ValueError, self.default_args_construct, \
{"interval_count":-1})
class PreAuthParamsToDictTestCase(unittest.TestCase):
def setUp(self):
self.all_params = {
"max_amount":12,
"interval_unit":"day",
"interval_length":10,
"merchant_id":"1234435",
"name":"aname",
"description":"adesc",
"interval_count":123,
"currency":"GBP",
"expires_at":datetime.datetime.strptime("2020-01-01", "%Y-%m-%d"),
"calendar_intervals":True
}
self.required_keys = [
"max_amount", "interval_unit", "interval_length", "merchant_id"]
def create_from_params_dict(self, in_params):
params = in_params.copy()
pa = urlbuilder.PreAuthorizationParams(params.pop("max_amount"), \
params.pop("merchant_id"), \
params.pop("interval_length"), \
params.pop("interval_unit"),\
**params)
return pa
def assert_inverse(self, keys):
params = dict([[k,v] for k,v in six.iteritems(self.all_params) \
if k in keys])
pa = self.create_from_params_dict(params)
self.assertEqual(params, pa.to_dict())
def test_to_dict_all_params(self):
self.assert_inverse(list(self.all_params.keys()))
def test_to_dict_only_required(self):
self.assert_inverse(self.required_keys)
class BillParamsTestCase(unittest.TestCase):
def create_params(self, *args, **kwargs):
return urlbuilder.BillParams(*args, **kwargs)
def test_amount_is_positive(self):
params = self.create_params(10, "merchid")
with self.assertRaises(ValueError):
par2 = self.create_params(-1, "merchid")
def test_to_dict_required(self):
pars = self.create_params(10, "merchid")
res = pars.to_dict()
expected = {"amount":10, "merchant_id":"merchid"}
self.assertEqual(res, expected)
def test_to_dict_optional(self):
pars = self.create_params(10, "merchid", name="aname", description="adesc")
res = pars.to_dict()
expected = {"amount":10,
"name":"aname",
"description":"adesc",
"merchant_id":"merchid"
}
self.assertEqual(res, expected)
def test_resource_name_is_bills(self):
pars = urlbuilder.BillParams(10, "merchid")
self.assertEqual(pars.resource_name, "bills")
class SubscriptionParamsTestCase(ExpiringLimitTestCase, unittest.TestCase):
def create_params(self, *args, **kwargs):
return urlbuilder.SubscriptionParams(*args, **kwargs)
def test_setup_fee(self):
pars = self.create_params(10, "merchid", 10, "day", setup_fee=20)
expected = {
"merchant_id": "merchid",
"amount": 10,
"interval_length": 10,
"interval_unit" : "day",
"setup_fee": 20
}
self.assertEqual(expected, pars.to_dict())
def test_start_at_in_future(self):
valid_date = datetime.datetime.now() + datetime.timedelta(200)
invalid_date = datetime.datetime.now() - datetime.timedelta(100)
par1 = self.create_params(10,"merchid", 10, "day", start_at=valid_date)
with self.assertRaises(ValueError):
par2 = self.create_params(10, "merchid", 10, "day",
start_at=invalid_date)
def test_expires_at_after_start_at(self):
date1 = datetime.datetime.now() + datetime.timedelta(100)
date2 = datetime.datetime.now() + datetime.timedelta(200)
par1 = self.create_params(10, "merchid", 10, "day",
expires_at=date2, start_at=date1)
with self.assertRaises(ValueError):
par2 = self.create_params(10, "merchid", 10, "day",
expires_at=date1, start_at=date2)
def test_to_dict_only_required(self):
expected = {
"merchant_id":"merchid",
"amount":10,
"interval_length":10,
"interval_unit":"day"}
pars = self.create_params(10, "merchid", 10, "day")
self.assertEqual(expected, pars.to_dict())
def test_to_dict_all(self):
start_at = datetime.datetime.now() + datetime.timedelta(1000)
expires_at =datetime.datetime.now() + datetime.timedelta(2000)
expected = {
"merchant_id":"merchid",
"amount":10,
"interval_length":10,
"interval_unit":"day",
"interval_count":5,
"start_at":start_at.isoformat()[:-7] + "Z",
"expires_at":expires_at.isoformat()[:-7] + "Z",
"name":"aname",
"description":"adesc",
}
par = self.create_params(10, "merchid", 10, "day", start_at=start_at,
expires_at=expires_at, interval_count=5, name="aname",
description="adesc")
self.assertEqual(expected, par.to_dict())
class PrepopDataTestCase(unittest.TestCase):
def setUp(self):
self.mock_prepop = {"first_name": "Tom",
"last_name": "Blomfield",
"email": "tom@gocardless.com"
}
def assert_prepop(self, params):
self.assertEqual(params.to_dict()["user"], self.mock_prepop)
def test_bill_params(self):
params = urlbuilder.BillParams(10, "amerchid", user=self.mock_prepop)
self.assert_prepop(params)
def test_sub_params(self):
params = urlbuilder.SubscriptionParams(10, "merchid", 3, "day", user=self.mock_prepop)
self.assert_prepop(params)
def test_pre_auth_params(self):
params = urlbuilder.PreAuthorizationParams(10, "amerchid", 5, "day", user=self.mock_prepop)
self.assert_prepop(params)
|
mit
| -7,502,890,767,632,523,000
| 36.92827
| 99
| 0.590833
| false
| 3.73763
| true
| false
| false
|
genyang/classifip
|
classifip/representations/intervalsProbability.py
|
1
|
8808
|
import numpy as np
from credalset import CredalSet
class IntervalsProbability(CredalSet):
"""Class of probability intervals: probabilistic bounds on singletons
:param lproba: a 2xn array containing upper (1st row) and lower bounds
:type lproba: :class:`~numpy.array`
:param nbDecision: number of elements of the space
:type nbDecision: integer
>>> from numpy import array
>>> ip=array([[0.5, 0.5, 0.5], [0.1, 0.1, 0.1]])
>>> from classifip.representations.intervalsProbability import IntervalsProbability
>>> intprob=IntervalsProbability(ip)
>>> print(intprob)
y0 y1 y2
--------------------
upper bound | 0.500 0.500 0.500`
lower bound | 0.100 0.100 0.100
>>> ip2=array([[0.4, 0.5, 0.6], [0., 0.1, 0.2]])
>>> intprob2=IntervalsProbability(ip2)
>>> print intprob & intprob2
y0 y1 y2
--------------------
upper bound | 0.400 0.500 0.500
lower bound | 0.100 0.100 0.200
>>> print intprob | intprob2
y0 y1 y2
--------------------
upper bound | 0.500 0.500 0.600
lower bound | 0.000 0.100 0.100
>>> print intprob + intprob2
y0 y1 y2
--------------------
upper bound | 0.450 0.500 0.550
lower bound | 0.050 0.100 0.150
>>> ip3=array([[0.7, 0.5, 0.2], [0.4, 0.2, 0.1]])
>>> intprob3=IntervalsProbability(ip3)
>>> intprob3.isreachable()
1
>>> intprob3.getmaximindecision()
0
>>> intprob3.getmaximaxdecision()
0
>>> intprob3.getintervaldomdecision()
array([ 1., 1., 0.])
>>> intprob3.getmaximaldecision()
array([ 1., 1., 0.])
"""
def __init__(self,lproba):
"""Instanciate probability interval bounds
:param lproba: a 2xn array containing upper (1st row) and lower bounds
:type lproba: :class:`~numpy.array`
"""
if lproba.__class__.__name__ != 'ndarray':
raise Exception('Expecting a numpy array as argument')
if lproba[:,1].size != 2:
raise Exception('Array should contain two rows: top for upper prob, bottom for lower prob')
if lproba.ndim != 2:
raise Exception('Bad dimension of array: should contain 2 dimensions')
self.lproba=lproba
self.nbDecision=lproba[0].size
if np.all(lproba[0] >=lproba[1]) != 1:
raise Exception('Some upper bounds lower than lower bounds')
def isproper(self):
"""Check if probability intervals induce a non-empty probability set.
:returns: 0 (empty/incur sure loss) or 1 (non-empty/avoid sure loss).
:rtype: integer
"""
if self.lproba[1,:].sum()<=1 and self.lproba[0,:].sum()>=1:
return 1
else:
return 0
def getlowerprobability(self,subset):
"""Compute lower probability of an event expressed in binary code.
:param subset: the event of interest (a 1xn vector containing 1 for elements
in the event, 0 otherwise.)
:param type: np.array
:returns: lower probability value
:rtype: float
"""
if subset.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if subset.size != self.nbDecision:
raise Exception('Subset incompatible with the frame size')
if self.isreachable()==0:
self.setreachableprobability()
lowerProbability=max(self.lproba[1,subset[:]==1].sum(),1-self.lproba[0,subset[:]==0].sum())
return lowerProbability
def getupperprobability(self,subset):
"""Compute upper probability of an event expressed in binary code.
:param subset: the event of interest (a 1xn vector containing 1 for elements
in the event, 0 otherwise.)
:param type: np.array
:returns: upper probability value
:rtype: float
"""
if subset.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if subset.size != self.nbDecision:
raise Exception('Subset incompatible with the frame size')
if self.isreachable()==0:
self.setreachableprobability()
upperProbability=min(self.lproba[0,subset[:]==1].sum(),1-self.lproba[1,subset[:]==0].sum())
return upperProbability
def getlowerexpectation(self,function):
"""Compute the lower expectation of a given (bounded) function by using
the Choquet integral
:param function: the function values
:param type: np.array
:returns: lower expectation value
:rtype: float
"""
lowerexpe=0.
if function.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if function.size != self.nbDecision:
raise Exception('number of elements incompatible with the frame size')
function=function.astype(float)
sortedf=np.sort(function)
indexedf=np.argsort(function)
lowerexpe=lowerexpe+sortedf[0]
for i in range(self.nbDecision)[1:]:
addedval=sortedf[i]-sortedf[i-1]
event=np.zeros(self.nbDecision)
event[indexedf[i:]]=1
lowerexpe=lowerexpe+addedval*self.getlowerprobability(event)
return lowerexpe
def isreachable(self):
"""Check if the probability intervals are reachable (are coherent)
:returns: 0 (not coherent/tight) or 1 (tight/coherent).
:rtype: integer
"""
for i in range(self.nbDecision):
subset=np.ones(self.nbDecision)
subset[i]=0
if self.lproba[0,i] + self.lproba[1,subset[:]==1].sum() > 1.0:
return 0
if self.lproba[1,i] + self.lproba[0,subset[:]==1].sum() < 1.0:
return 0
return 1
def setreachableprobability(self):
"""Make the bounds reachable.
"""
if self.isproper()==1:
lreachableProba=np.zeros((2,self.nbDecision))
for i in range(self.nbDecision):
subset=np.ones(self.nbDecision)
subset[i]=0
lb=max(self.lproba[1,i],1-self.lproba[0,subset[:]==1].sum())
ub=min(self.lproba[0,i],1-self.lproba[1,subset[:]==1].sum())
lreachableProba[1,i]=lb
lreachableProba[0,i]=ub
self.lproba[:]=lreachableProba[:]
else:
raise Exception('intervals inducing empty set: operation not possible')
def __str__(self):
"""Print the current bounds
"""
str1,str2="upper bound |","lower bound |"
str3=" "
i=0
for interval in range(self.nbDecision):
str3+=" y%d " %i
str1+=" %.3f" % self.lproba[0,interval]
str2+=" %.3f" % self.lproba[1,interval]
i+=1
str3+="\n"
str3+=" "
str3+="--------------------"
str3+="\n"
str3+=str1
str3+="\n"
str3+=str2
str3+="\n"
return str3
def __and__(self,other):
"""Compute the intersection of two probability intervals
"""
mini=np.maximum(self.lproba[1,:],other.lproba[1,:])
maxi=np.minimum(self.lproba[0,:],other.lproba[0,:])
if mini.sum() >= 0.9999999 or maxi.sum() <= 0.9999999:
raise Exception('empty intersection')
for i in range(self.nbDecision):
if mini[i] >= maxi[i] - 0.0000001:
raise Exception('empty intersection')
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=mini
fusedproba[0,:]=maxi
result=IntervalsProbability(fusedproba)
result.setreachableprobability()
return result
def __or__(self,other):
"""Compute the union of two probability intervals
"""
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=np.minimum(self.lproba[1,:],other.lproba[1,:])
fusedproba[0,:]=np.maximum(self.lproba[0,:],other.lproba[0,:])
result=IntervalsProbability(fusedproba)
return result
def __add__(self,other):
"""Compute the average of two probability intervals
"""
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=np.mean([self.lproba[1,:],other.lproba[1,:]],axis=0)
fusedproba[0,:]=np.mean([self.lproba[0,:],other.lproba[0,:]],axis=0)
result=IntervalsProbability(fusedproba)
return result
|
gpl-2.0
| -5,486,011,638,803,645,000
| 36.322034
| 103
| 0.557221
| false
| 3.814638
| false
| false
| false
|
jeromeku/Python-Financial-Tools
|
portfolio.py
|
1
|
8295
|
# portfolio.py This class represents a portfolio of stocks. It supports optimization
# of assets via a quadratic program.
#
# The following is an example of how the portfolio class may be used to represent a
# portfolio of assets representing major technology companies:
# portfolio = Portfolio(["MSFT","GOOG","IBM"])
# print "The value at risk: %.2f" % portfolio.calculate_parametric_risk(.05,1000)
# print "The expected shortfall: %.2f" % portfolio.calculate_parametric_risk(.05,1000,True)
import numpy as np
from stock import Stock
from cvxopt import matrix
from cvxopt.blas import dot
from cvxopt import solvers
from scipy import stats
from pprint import pprint
solvers.options["show_progress"] = False
class Portfolio(object):
def __init__(self,assets,risk_free = None,position = None):
# The position refers to the dollar amount invested into this particular
# portfolio. The position can be allocated so that it corresponds to the
# portfolio with the maximum sharpe's ratio, or to the portfolio with the
# minimum risk.
self.position = position if position is not None else None
self.assets = [Stock(stock["ticker"],stock["date_range"]) if type(stock) is dict else Stock(stock) for stock in assets]
if risk_free is not None:
self.risk_free = Stock(risk_free["ticker"],risk_free["date_range"]) if type(risk_free) is dict else Stock(risk_free)
else:
self.risk_free = Stock("^IRX")
self.n = len(self.assets)
self.statistics = self.calculate_statistics()
self.optimization = self.optimize_portfolio()
self.returns = self.calculate_portfolio_returns()
def __str__(self):
print_string = "Assets in portfolio: [" + " ".join([asset.ticker for asset in self.assets]) + "]\n\n"
for asset in self.assets:
print_string += asset.__str__() + "\n\n"
print_string += "The weights for each asset in the portfolio:\n"
for i in range(self.n):
print_string += "\t" + self.assets[i].ticker + "\t: " + str(self.optimization["max_sharpe_weights"][i][0]) + "\n"
print_string += "\nExpected return: %.4f" % self.returns
return print_string
def calculate_portfolio_returns(self):
returns = 0.0
for i in range(self.n):
returns += self.assets[i].statistics["expected_return"] * self.optimization["max_sharpe_weights"][i][0]
return returns
def calculate_statistics(self):
statistics = {}
returns = np.zeros((len(self.assets[0].statistics["returns"]),self.n))
for i in range(self.n):
returns[:,i] = self.assets[i].statistics["returns"]
statistics["expected_asset_returns"] = np.array([asset.statistics["expected_return"] for asset in self.assets])
statistics["covariance"] = np.cov(returns,rowvar = 0)
# Due to the behavior of the numpy "diag" function, scalar inputs will fail and
# produce an error. This instance occurs when there is only a single asset in the
# portfolio. In this case, simply exclude the call to "diag" and calculate the
# standard deviation and the square root of a scalar covariance "matrix".
if statistics["covariance"].shape == ():
statistics["standard_deviation"] = np.sqrt(statistics["covariance"])
else:
statistics["standard_deviation"] = np.sqrt(np.diag(statistics["covariance"]))
return statistics
def calculate_parametric_risk(self,alpha,expected_shortfall = False,position = None):
if position is None and self.position is not None:
position = self.position
elif position is None and self.position is None:
print "Either specify a position for the portfolio object or provide one as an input parameter."
return np.nan
mu = self.statistics["expected_asset_returns"]
S = self.statistics["covariance"]
w = self.optimization["max_sharpe_weights"]
portfolio_mu = np.dot(mu,w)
portfolio_sigma = np.sqrt(np.dot(np.dot(w.T,S),w))[0]
quantile = stats.norm.ppf(alpha)
if expected_shortfall:
risk = position * (-portfolio_mu + portfolio_sigma * (stats.norm.pdf(quantile) / alpha))
else:
risk = -position * (portfolio_mu + quantile * portfolio_sigma)
return risk
def optimize_kelly_criterion(self):
# This code attempts to reproduce the optimization routine proposed by
# Vasily Nekrasov using the Kelly criterion. In particular, this code
# uses as reference the following work:
#
# Nekrasov, Vasily. 2013. "Kelly Criterion for Multivariate Portfolios:
# A Model-Free Approach".
kelly_optimization = {}
n = self.n
r = self.risk_free.statistics["expected_daily_return"]
S = matrix(1.0 / ((1 + r) ** 2) * self.statistics["covariance"])
r_assets = matrix([asset.statistics["expected_daily_return"] for asset in self.assets])
q = matrix(1.0 / (1 + r) * (r_assets - r))
G, h, A, b = self.optimization_constraint_matrices()
# Notice that the "linear" term in the quadratic optimization formulation is made
# negative. This is because Nekrasov maximizes the function, whereas CXVOPT is forced
# to minimize. By making the linear term negative, we arrive at an equivalent
# formulation.
portfolio_weights = solvers.qp(S,-q,G,h,A,b)["x"]
kelly_optimization["weights"] = np.array([portfolio_weights[i] for i in range(n)])
return kelly_optimization
def optimize_portfolio(self):
optimization = {}
n = self.n
S = matrix(2 * self.statistics["covariance"])
expected_returns = matrix(self.statistics["expected_asset_returns"])
G, h, A, b = self.optimization_constraint_matrices()
mu_array = [10**(5.0*t/100-1.0) for t in range(100)]
portfolio_weights = [solvers.qp(mu*S,-expected_returns,G,h,A,b)["x"] for mu in mu_array]
returns = [dot(expected_returns,w) for w in portfolio_weights]
risk = [np.sqrt(dot(w,S*w)) for w in portfolio_weights]
# Calculate the portfolio with the greatest "reward-to-risk" ratio, which
# is Sharpe's ratio. Notice that it is not necessary to specify the risk
# free rate in the calculation of Sharpe's ratio, as without loss of generality
# it may be assumed to be zero. In either case, the same portfolio will
# achieve the maximum. However, since the risk free asset defaults to a
# Treasury bill, we take no action regarding this observation.
mu_free = self.risk_free.statistics["expected_return"]
sharpe_ratio = (returns - mu_free) / risk
max_sharpe_index = sharpe_ratio == max(sharpe_ratio)
min_variance_index = risk == min(risk)
optimization["returns"] = returns
optimization["risk"] = risk
# If possible, try to decrease the number of for loops used to extract the
# optimal weights of the portfolio. At the time of writing this, it seems
# that the matrix data structure is somewhat bizarre. Therefore, in order to
# generate the desired numpy array object, so many for loops turned out to
# be necessary.
max_sharpe_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if max_sharpe_index[i]]
min_variance_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if min_variance_index[i]]
optimization["max_sharpe_weights"] = np.zeros((n,1))
optimization["min_variance_weights"] = np.zeros((n,1))
for i in range(len(max_sharpe_weights[0])):
optimization["max_sharpe_weights"][i] = max_sharpe_weights[0][i]
for i in range(len(min_variance_weights[0])):
optimization["min_variance_weights"][i] = min_variance_weights[0][i]
return optimization
def optimization_constraint_matrices(self):
n = self.n
G = matrix(0.0, (n,n))
G[::n+1] = -1.0
h = matrix(0.0, (n,1))
A = matrix(1.0, (1,n))
b = matrix(1.0)
return G, h, A, b
|
mit
| -9,167,423,260,467,966,000
| 43.837838
| 128
| 0.639662
| false
| 3.773885
| false
| false
| false
|
qliu/globe_nocturne
|
globenocturne/globenocturneapp/migrations/0002_auto_20150416_1956.py
|
1
|
2732
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('globenocturneapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DMSPDataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('wms_layer', models.CharField(max_length=100, verbose_name=b'WMS Layer')),
],
options={
'db_table': 'dmsp_dataset',
'verbose_name': 'DMSP Dataset',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DMSPProduct',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'db_table': 'dmsp_product',
'verbose_name': 'DMSP Product',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Satellite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=10)),
],
options={
'db_table': 'satellite',
'verbose_name': 'Satellite',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SatYear',
fields=[
('year', models.IntegerField(serialize=False, primary_key=True)),
],
options={
'db_table': 'sat_year',
'verbose_name': 'Year',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='dmspdataset',
name='product',
field=models.ForeignKey(verbose_name=b'DMSP Product', to='globenocturneapp.DMSPProduct'),
preserve_default=True,
),
migrations.AddField(
model_name='dmspdataset',
name='satellite',
field=models.ForeignKey(verbose_name=b'Satellite', to='globenocturneapp.Satellite'),
preserve_default=True,
),
migrations.AddField(
model_name='dmspdataset',
name='year',
field=models.ForeignKey(verbose_name=b'Year', to='globenocturneapp.SatYear'),
preserve_default=True,
),
]
|
gpl-2.0
| 2,225,764,835,229,984,000
| 33.15
| 114
| 0.504758
| false
| 4.385233
| false
| false
| false
|
SEMAFORInformatik/femagtools
|
examples/calculation/ld_lq_fast.py
|
1
|
3356
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ld-Lq-Identification with Femag
"""
import os
import femagtools
import femagtools.machine
import logging
import numpy as np
feapars = {
"num_move_steps": 25,
"calculationMode": "ld_lq_fast",
"magn_temp": 60.0,
"i1_max": 150.0,
"beta_max": 0.0,
"beta_min": -60.0,
"num_cur_steps": 3,
"num_beta_steps": 4,
"skew_angle": 0.0,
"num_par_wdgs": 1,
"speed": 50.0
}
magnetMat = [{
"name": "M395",
"remanenc": 1.17,
"temcoefbr": -0.001,
"spmaweight": 7.5,
"magntemp": 20.0,
"temcoefhc": -0.001,
"hcb": 810000.4,
"relperm": 1.05,
"magncond": 833333,
"magnwidth": 15.0e-3,
"magnlength": 100.0e-3,
"hc_min": 760000.0}
]
magnetizingCurve = "../magnetcurves"
pmMotor = {
"name": "PM 270 L8",
"desc": "PM Motor 270mm 8 poles VMAGN",
"poles": 8,
"outer_diam": 0.26924,
"bore_diam": 0.16192,
"inner_diam": 0.11064,
"airgap": 0.00075,
"lfe": 0.08356,
"stator": {
"num_slots": 48,
"num_slots_gen": 12,
"mcvkey_yoke": "M330-50A",
"nodedist": 4.0,
"statorRotor3": {
"slot_height": 0.0335,
"slot_h1": 0.001,
"slot_h2": 0.0,
"slot_width": 0.00193,
"slot_r1": 0.0001,
"slot_r2": 0.00282,
"wedge_width1": 0.00295,
"wedge_width2": 0.0,
"middle_line": 0.0,
"tooth_width": 0.0,
"slot_top_sh": 0.0}
},
"magnet": {
"nodedist": 1.0,
"material": "M395",
"mcvkey_yoke": "M330-50A",
"magnetIronV": {
"magn_angle": 145.0,
"magn_height": 0.00648,
"magn_width": 0.018,
"condshaft_r": 0.05532,
"magn_num": 1.0,
"air_triangle": 1,
"iron_hs": 0.0001,
"gap_ma_iron": 0.0002,
"iron_height": 0.00261,
"magn_rem": 1.2,
"iron_shape": 0.0802
}
},
"windings": {
"num_phases": 3,
"num_layers": 1,
"num_wires": 9,
"coil_span": 6.0,
"cufilfact": 0.4,
"culength": 1.4,
"slot_indul": 0.5e-3
}
}
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
workdir = os.path.join(os.path.expanduser('~'), 'femag')
try:
os.makedirs(workdir)
except OSError:
pass
femag = femagtools.Femag(workdir,
magnetizingCurves=magnetizingCurve,
magnets=magnetMat)
r = femag(pmMotor, feapars)
print(r.type)
# find speed at u1max
u1max = 340
tq = 170
ld = r.ldq['ld']
lq = r.ldq['lq']
i1 = r.ldq['i1']
beta = r.ldq['beta']
psim = r.ldq['psim']
p = r.machine['p']
r1 = 0.0
pm = femagtools.machine.PmRelMachineLdq(3, p,
psim,
ld,
lq,
r1,
beta,
i1)
tq = 170.0
u1 = 340.0
iqx, idx = pm.iqd_torque(tq)
w1 = pm.w1_u(u1, iqx, idx)
betaopt, i1 = femagtools.machine.betai1(iqx, idx)
print("f1 {0:8.1f} Hz, I1 {1:8.1f} A, Beta {2:4.1f} °".format(
w1/2/np.pi, i1, betaopt/np.pi*180))
|
bsd-2-clause
| 4,781,478,290,324,042,000
| 22.298611
| 63
| 0.467064
| false
| 2.690457
| false
| true
| false
|
Symmetric/calico-docker
|
calico_containers/calico_ctl/status.py
|
1
|
2475
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl status
Description:
Print current status information regarding calico-node container
and the BIRD routing daemon.
"""
import re
from utils import docker_client
def status(arguments):
"""
Main dispatcher for status commands. Calls the corresponding helper
function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
calico_node_info = filter(lambda container: "/calico-node" in
container["Names"],
docker_client.containers())
if len(calico_node_info) == 0:
print "calico-node container not running"
else:
print "calico-node container is running. Status: %s" % \
calico_node_info[0]["Status"]
apt_cmd = docker_client.exec_create("calico-node", ["/bin/bash", "-c",
"apt-cache policy calico-felix"])
result = re.search(r"Installed: (.*?)\s", docker_client.exec_start(apt_cmd))
if result is not None:
print "Running felix version %s" % result.group(1)
print "IPv4 Bird (BGP) status"
bird_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc -s /etc/service/bird/bird.ctl"])
print docker_client.exec_start(bird_cmd)
print "IPv6 Bird (BGP) status"
bird6_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc6 -s "
"/etc/service/bird6/bird6.ctl"])
print docker_client.exec_start(bird6_cmd)
|
apache-2.0
| 4,725,994,529,181,571,000
| 38.919355
| 84
| 0.585859
| false
| 4.245283
| false
| false
| false
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/h/hdf5.py
|
1
|
4242
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing HDF5, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_HDF5(ConfigureMake):
"""Support for building/installing HDF5"""
def configure_step(self):
"""Configure build: set require config and make options, and run configure script."""
# configure options for dependencies
deps = [
("Szip", "--with-szlib"),
("zlib", "--with-zlib"),
]
for (dep, opt) in deps:
root = get_software_root(dep)
if root:
self.cfg.update('configopts', '%s=%s' % (opt, root))
fcomp = 'FC="%s"' % os.getenv('F90')
self.cfg.update('configopts', "--with-pic --with-pthread --enable-shared")
self.cfg.update('configopts', "--enable-cxx --enable-fortran %s" % fcomp)
# MPI and C++ support enabled requires --enable-unsupported, because this is untested by HDF5
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "--enable-unsupported --enable-parallel")
else:
self.cfg.update('configopts', "--disable-parallel")
# make options
self.cfg.update('buildopts', fcomp)
# set RUNPARALLEL if MPI is not enabled (or not supported by this toolchain)
if self.toolchain.options.get('usempi', None):
env.setvar('RUNPARALLEL', 'mpirun -np \$\${NPROCS:=2}')
super(EB_HDF5, self).configure_step()
# default make and make install are ok
def sanity_check_step(self):
"""
Custom sanity check for HDF5
"""
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
extra_binaries = ["bin/%s" % x for x in ["h5perf", "h5pcc", "h5pfc", "ph5diff"]]
else:
extra_binaries = ["bin/%s" % x for x in ["h5cc", "h5fc"]]
libs = ['', '_cpp', '_fortran', '_hl_cpp', '_hl', 'hl_fortran']
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': ["bin/h5%s" % x for x in ["2gif", "c++", "copy", "debug", "diff",
"dump", "import", "jam","ls", "mkgrp",
"perf_serial", "redeploy", "repack",
"repart", "stat", "unjam"]] +
["bin/gif2h5"] + extra_binaries +
["lib/libhdf5%s.%s" % (l, shlib_ext) for l in libs],
'dirs': ['include'],
}
super(EB_HDF5, self).sanity_check_step(custom_paths=custom_paths)
|
gpl-2.0
| 1,027,973,136,351,787,100
| 39.788462
| 101
| 0.621405
| false
| 3.613288
| true
| false
| false
|
leahrnh/ticktock_text_api
|
prepare_data_user_input.py
|
1
|
1698
|
#!/usr/bin/etc python
import sys
import time
import os
import json
import pickle
def readfile(fn):
result = {}
result["Turns"] = {}
current_turn = 0
key_index = 0
keys = ["Turn", "You", "TickTock", "Appropriateness"]
for l in open(fn):
if ":" in l:
key = l.split(":")[0]
value = ":".join(l.split(":")[1:]).strip()
if key == "TurkID" or key == "UserID":
result[key] = value
else:
if keys[key_index%4] != key:
print l
assert(False)
key_index += 1
if key == "Turn":
current_turn = int(value)
result["Turns"][current_turn] = {}
elif key in keys[1:4]:
result["Turns"][current_turn][key] = value
else:
assert(False)
return result
def readall(dir_path):
result = {}
for f in os.listdir(dir_path):
print f
if ".txt" in f and "rating" in f:
full_path = os.path.join(dir_path, f)
result[full_path] = readfile(full_path)
return result
def get_log(rating_logs):
writelist =[]
for f,r in rating_logs.iteritems():
num_turns = len(r["Turns"])
for i in range(1, num_turns + 1):
tmpdict ={}
tmpdict["question"]= r["Turns"][i]["You"]
tmpdict["answer"] = r["Turns"][i]["TickTock"]
tmpdict["app_value"]=r["Turns"][i]["Appropriateness"]
tmpdict["user_id"]=r["TurkID"]
#tmpdict["aSentId"]=2016
writelist.append(tmpdict)
return writelist
rating_logs = readall("/home/ubuntu/zhou/Backend/rating_log/v2")
writelist = get_log(rating_logs)
with open('user_input_v2.txt','w') as f:
for tmpdict in writelist:
f.write(tmpdict["question"]+'\n')
with open('user_input_v2.pkl','w') as f:
pickle.dump(writelist,f)
|
gpl-2.0
| -2,080,913,587,433,048,600
| 24.727273
| 64
| 0.585984
| false
| 2.97373
| false
| false
| false
|
un-brs/mfiles-tools
|
mfiles_project/mfiles_sync/management/commands/mfcache.py
|
1
|
9571
|
# -*- coding: utf-8 -*-
import datetime
import calendar
from mfiles_sync.models import (Vault, Document, DocumentView, PropertyDef,
Property, DocumentProperty)
from django.conf import settings
from django.core.management.base import BaseCommand
from win32com.client import gencache
mfiles = gencache.EnsureModule(
'{B9C079AA-92DD-4FB4-A0E0-AA3198955B45}', 0, 1, 0
)
def add_months(sourcedate, months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
# print(year, month)
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return datetime.date(year, month, day)
class Command(BaseCommand):
help = 'Syncronize MFiles'
def add_arguments(self, parser):
pass
def get_server_vaults(self):
server = mfiles.MFilesServerApplication()
server.Connect(AuthType=mfiles.constants.MFAuthTypeSpecificMFilesUser,
UserName=settings.MFILES_USERNAME,
Password=settings.MFILES_PASSWORD,
NetworkAddress=settings.MFILES_HOST,
Endpoint="2266")
return server.GetVaults()
def process_valuelist(self, db_pdef, mfiles_valuelist):
for mfiles_item in mfiles_valuelist:
db_prop = Property(
mfiles_display_id=mfiles_item.DisplayID, pdef=db_pdef)
db_prop.set_value(mfiles_item.Name)
db_prop.save()
def process_propertydef(self, mfiles_pdef, mfiles_vault, db_vault):
db_pdefs = list(
PropertyDef.objects.filter(
mfiles_id=mfiles_pdef.ID, vault=db_vault)
)
if db_pdefs:
db_pdef = db_pdefs[0]
else:
db_pdef = PropertyDef(
name=mfiles_pdef.Name,
mfiles_id=mfiles_pdef.ID,
vault=db_vault,
dtype=mfiles_pdef.DataType
)
db_pdef.save()
if mfiles_pdef.ValueList:
mfiles_valuelist = (
mfiles_vault.ValueListItemOperations.GetValueListItems(
mfiles_pdef.ValueList
)
)
self.process_valuelist(
db_pdef=db_pdef,
mfiles_valuelist=mfiles_valuelist
)
return db_pdef
def process_property(self, mfiles_typedvalue, db_pdef, db_doc):
if db_pdef.dtype in (PropertyDef.MFDatatypeMultiSelectLookup,
PropertyDef.MFDatatypeLookup):
for lookup in mfiles_typedvalue.GetValueAsLookups():
db_props = list(
db_pdef.property_set.filter(
mfiles_display_id=lookup.DisplayID
)
)
if db_props:
db_prop = db_props[0]
db_docprop = DocumentProperty(doc=db_doc, prop=db_prop)
db_docprop.save()
else:
if mfiles_typedvalue.Value:
db_prop = Property(pdef=db_pdef)
db_prop.set_value(mfiles_typedvalue.Value)
db_prop.save()
db_docprop = DocumentProperty(doc=db_doc, prop=db_prop)
db_docprop.save()
def process_properties(self, mfiles_props, mfiles_vault, db_vault, db_doc):
for mfiles_prop in mfiles_props:
mfiles_pdef = mfiles_vault.PropertyDefOperations.GetPropertyDef(
mfiles_prop.PropertyDef
)
db_pdef = self.process_propertydef(
mfiles_pdef=mfiles_pdef,
mfiles_vault=mfiles_vault,
db_vault=db_vault
)
self.process_property(
mfiles_typedvalue=mfiles_prop.Value,
db_pdef=db_pdef,
db_doc=db_doc
)
def process_object_version(self, mfiles_vault, object_version, db_view,
db_vault):
if object_version.FilesCount != 1:
self.stderr.write(
"'%s' does not contains files" % object_version.Title
)
return
file = object_version.Files.Item(1)
db_doc = Document(
mfiles_id=object_version.ObjVer.ID,
vault=db_vault,
name=file.Title,
ext=file.Extension,
size=file.LogicalSize,
created=object_version.CreatedUtc,
modified=object_version.LastModifiedUtc
)
db_doc.save()
self.stdout.write("Process document '%s.%s'" %
(db_doc.name, db_doc.ext)
)
db_docview = DocumentView(doc=db_doc, view=db_view)
db_docview.save()
mfiles_props = (
mfiles_vault.ObjectOperations.GetObjectVersionAndProperties(
object_version.ObjVer
).Properties
)
self.process_properties(
mfiles_vault=mfiles_vault,
mfiles_props=mfiles_props,
db_vault=db_vault,
db_doc=db_doc
)
def process_view(self, mfiles_vault, mfiles_view, db_view, db_vault):
self.stdout.write(str(db_view))
db_view.condition = (
mfiles_view.SearchConditions.GetAsExportedSearchString(
mfiles.constants.MFSearchFlagReturnLatestVisibleVersion
)
)
db_view.save()
conditions = mfiles_view.SearchConditions
df_date = mfiles.DataFunctionCall()
df_date.SetDataDate()
# ======================================================================
search = mfiles.SearchCondition()
expression = mfiles.Expression()
value = mfiles.TypedValue()
expression.SetPropertyValueExpression(
mfiles.constants.MFBuiltInPropertyDefLastModified,
mfiles.constants.MFParentChildBehaviorNone,
df_date
)
# value.SetValue(mfiles.constants.MFDatatypeDate, '15/12/2014')
search.Set(
expression,
mfiles.constants.MFConditionTypeGreaterThanOrEqual,
value
)
conditions.Add(-1, search)
# ======================================================================
search = mfiles.SearchCondition()
expression = mfiles.Expression()
# value = mfiles.TypedValue()
expression.SetPropertyValueExpression(
mfiles.constants.MFBuiltInPropertyDefLastModified,
mfiles.constants.MFParentChildBehaviorNone,
df_date
)
# value.SetValue(mfiles.constants.MFDatatypeDate, '15/12/2014')
search.Set(
expression, mfiles.constants.MFConditionTypeLessThan, value
)
conditions.Add(-1, search)
# ======================================================================
start = datetime.date(2014, 12, 1)
end = add_months(start, 1)
while start < datetime.date.today():
print("Process date range", start, end)
conditions.Item(conditions.Count - 1).TypedValue.SetValue(
mfiles.constants.MFDatatypeDate, start.strftime('%d/%m/%Y')
)
conditions.Item(conditions.Count).TypedValue.SetValue(
mfiles.constants.MFDatatypeDate, end.strftime('%d/%m/%Y')
)
objs = (
mfiles_vault.ObjectSearchOperations.
SearchForObjectsByConditionsEx(
conditions,
mfiles.constants.MFSearchFlagReturnLatestVisibleVersion,
False,
0
)
)
for object_version in objs:
self.process_object_version(
mfiles_vault=mfiles_vault,
object_version=object_version,
db_view=db_view,
db_vault=db_vault
)
start, end = end, add_months(start, 1)
def process_vault(self, mfiles_vault, db_vault):
self.stdout.write('Vault %s %s' % (db_vault.name,
mfiles_vault.GetGUID()))
db_vault.guid = mfiles_vault.GetGUID()
db_vault.save()
mfiles_views = {
v.Name: v for v in mfiles_vault.ViewOperations.GetViews()
}
for db_view in db_vault.view_set.filter(is_enabled=True):
mfiles_view = mfiles_views.get(db_view.name)
if mfiles_view:
self.process_view(
mfiles_vault=mfiles_vault,
mfiles_view=mfiles_view,
db_view=db_view,
db_vault=db_vault
)
else:
self.stdout.write("Could not find view '%s'" % db_view.name)
def handle(self, *args, **options):
mfiles_svaults = {v.Name: v for v in self.get_server_vaults()}
for db_vault in Vault.objects.filter(is_enabled=True):
mfiles_svault = mfiles_svaults.get(db_vault.name)
if mfiles_svault:
mfiles_vault = mfiles_svault.LogIn()
if mfiles_vault.LoggedIn:
self.process_vault(mfiles_vault, db_vault)
else:
self.stderr.write("Could not login to '%s' vault " %
db_vault.name)
else:
self.stderr.write("Could not find vault %s" % db_vault.name)
|
mit
| 4,169,048,206,284,975,000
| 34.579926
| 80
| 0.532024
| false
| 4.088424
| false
| false
| false
|
bolkedebruin/airflow
|
airflow/hooks/dbapi_hook.py
|
1
|
10860
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Optional
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
def connect(host, port, username, schema):
...
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: Optional[str]
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, str):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
|
apache-2.0
| 3,902,563,915,036,066,000
| 35.079734
| 85
| 0.580203
| false
| 4.580346
| false
| false
| false
|
gotcha/testcounter
|
counterpartylib/lib/messages/dividend.py
|
1
|
8391
|
#! /usr/bin/python3
"""Pay out dividends."""
import struct
import decimal
D = decimal.Decimal
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import (config, exceptions, util)
FORMAT_1 = '>QQ'
LENGTH_1 = 8 + 8
FORMAT_2 = '>QQQ'
LENGTH_2 = 8 + 8 + 8
ID = 50
def initialise (db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS dividends(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
asset TEXT,
dividend_asset TEXT,
quantity_per_unit INTEGER,
fee_paid INTEGER,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON dividends (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON dividends (source)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON dividends (asset)
''')
def validate (db, source, quantity_per_unit, asset, dividend_asset, block_index):
cursor = db.cursor()
problems = []
if asset == config.BTC:
problems.append('cannot pay dividends to holders of {}'.format(config.BTC))
if asset == config.XCP:
if (not block_index >= 317500) or block_index >= 320000 or config.TESTNET: # Protocol change.
problems.append('cannot pay dividends to holders of {}'.format(config.XCP))
if quantity_per_unit <= 0: problems.append('non‐positive quantity per unit')
# Examine asset.
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY tx_index ASC''', ('valid', asset)))
if not issuances:
problems.append('no such asset, {}.'.format(asset))
return None, None, problems, 0
divisible = issuances[0]['divisible']
# Only issuer can pay dividends.
if block_index >= 320000 or config.TESTNET: # Protocol change.
if issuances[-1]['issuer'] != source:
problems.append('only issuer can pay dividends')
# Examine dividend asset.
if dividend_asset in (config.BTC, config.XCP):
dividend_divisible = True
else:
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?)''', ('valid', dividend_asset)))
if not issuances:
problems.append('no such dividend asset, {}.'.format(dividend_asset))
return None, None, problems, 0
dividend_divisible = issuances[0]['divisible']
# Calculate dividend quantities.
holders = util.holders(db, asset)
outputs = []
addresses = []
dividend_total = 0
for holder in holders:
if block_index < 294500 and not config.TESTNET: # Protocol change.
if holder['escrow']: continue
address = holder['address']
address_quantity = holder['address_quantity']
if block_index >= 296000 or config.TESTNET: # Protocol change.
if address == source: continue
dividend_quantity = address_quantity * quantity_per_unit
if divisible: dividend_quantity /= config.UNIT
if not dividend_divisible: dividend_quantity /= config.UNIT
if dividend_asset == config.BTC and dividend_quantity < config.DEFAULT_MULTISIG_DUST_SIZE: continue # A bit hackish.
dividend_quantity = int(dividend_quantity)
outputs.append({'address': address, 'address_quantity': address_quantity, 'dividend_quantity': dividend_quantity})
addresses.append(address)
dividend_total += dividend_quantity
if not dividend_total: problems.append('zero dividend')
if dividend_asset != config.BTC:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, dividend_asset)))
if not balances or balances[0]['quantity'] < dividend_total:
problems.append('insufficient funds ({})'.format(dividend_asset))
fee = 0
if not problems and dividend_asset != config.BTC:
holder_count = len(set(addresses))
if block_index >= 330000 or config.TESTNET: # Protocol change.
fee = int(0.0002 * config.UNIT * holder_count)
if fee:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, config.XCP)))
if not balances or balances[0]['quantity'] < fee:
problems.append('insufficient funds ({})'.format(config.XCP))
cursor.close()
return dividend_total, outputs, problems, fee
def compose (db, source, quantity_per_unit, asset, dividend_asset):
dividend_total, outputs, problems, fee = validate(db, source, quantity_per_unit, asset, dividend_asset, util.CURRENT_BLOCK_INDEX)
if problems: raise exceptions.ComposeError(problems)
logger.info('Total quantity to be distributed in dividends: {} {}'.format(util.value_out(db, dividend_total, dividend_asset), dividend_asset))
if dividend_asset == config.BTC:
return (source, [(output['address'], output['dividend_quantity']) for output in outputs], None)
asset_id = util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX)
dividend_asset_id = util.get_asset_id(db, dividend_asset, util.CURRENT_BLOCK_INDEX)
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT_2, quantity_per_unit, asset_id, dividend_asset_id)
return (source, [], data)
def parse (db, tx, message):
dividend_parse_cursor = db.cursor()
# Unpack message.
try:
if (tx['block_index'] > 288150 or config.TESTNET) and len(message) == LENGTH_2:
quantity_per_unit, asset_id, dividend_asset_id = struct.unpack(FORMAT_2, message)
asset = util.get_asset_name(db, asset_id, tx['block_index'])
dividend_asset = util.get_asset_name(db, dividend_asset_id, tx['block_index'])
status = 'valid'
elif len(message) == LENGTH_1:
quantity_per_unit, asset_id = struct.unpack(FORMAT_1, message)
asset = util.get_asset_name(db, asset_id, tx['block_index'])
dividend_asset = config.XCP
status = 'valid'
else:
raise exceptions.UnpackError
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
dividend_asset, quantity_per_unit, asset = None, None, None
status = 'invalid: could not unpack'
if dividend_asset == config.BTC:
status = 'invalid: cannot pay {} dividends within protocol'.format(config.BTC)
if status == 'valid':
# For SQLite3
quantity_per_unit = min(quantity_per_unit, config.MAX_INT)
dividend_total, outputs, problems, fee = validate(db, tx['source'], quantity_per_unit, asset, dividend_asset, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
# Debit.
util.debit(db, tx['source'], dividend_asset, dividend_total, action='dividend', event=tx['tx_hash'])
if tx['block_index'] >= 330000 or config.TESTNET: # Protocol change.
util.debit(db, tx['source'], config.XCP, fee, action='dividend fee', event=tx['tx_hash'])
# Credit.
for output in outputs:
util.credit(db, output['address'], dividend_asset, output['dividend_quantity'], action='dividend', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'asset': asset,
'dividend_asset': dividend_asset,
'quantity_per_unit': quantity_per_unit,
'fee_paid': fee,
'status': status,
}
sql='insert into dividends values(:tx_index, :tx_hash, :block_index, :source, :asset, :dividend_asset, :quantity_per_unit, :fee_paid, :status)'
dividend_parse_cursor.execute(sql, bindings)
dividend_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
mit
| 3,948,772,545,682,078,000
| 42.010256
| 148
| 0.619888
| false
| 3.633882
| true
| false
| false
|
GammaC0de/pyload
|
src/pyload/plugins/decrypters/GoogledriveComFolder.py
|
1
|
5023
|
# -*- coding: utf-8 -*
import json
from pyload.core.network.http.exceptions import BadHeader
from ..base.decrypter import BaseDecrypter
class GoogledriveComFolder(BaseDecrypter):
__name__ = "GoogledriveComFolder"
__type__ = "decrypter"
__version__ = "0.12"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?drive\.google\.com/(?:folderview\?.*id=|drive/(?:.+?/)?folders/)(?P<ID>[-\w]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
(
"folder_per_package",
"Default;Yes;No",
"Create folder for each package",
"Default",
),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("dl_subfolders", "bool", "Download subfolders", False),
("package_subfolder", "bool", "Subfolder as a seperate package", False),
]
__description__ = """Drive.google.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
NAME_PATTERN = r"folderName: '(?P<N>.+?)'"
OFFLINE_PATTERN = r"<TITLE>"
API_URL = "https://www.googleapis.com/drive/v3/"
API_KEY = "AIzaSyAcA9c4evtwSY1ifuvzo6HKBkeot5Bk_U4"
def api_response(self, cmd, **kwargs):
kwargs["key"] = self.API_KEY
try:
json_data = json.loads(
self.load("{}{}".format(self.API_URL, cmd), get=kwargs)
)
self.log_debug(f"API response: {json_data}")
return json_data
except BadHeader as exc:
try:
json_data = json.loads(exc.content)
self.log_error(
"API Error: {}".format(cmd),
json_data["error"]["message"],
"ID: {}".format(self.info["pattern"]["ID"]),
"Error code: {}".format(exc.code),
)
except ValueError:
self.log_error(
"API Error: {}".format(cmd),
exc,
"ID: {}".format(self.info["pattern"]["ID"]),
"Error code: {}".format(exc.code),
)
return None
def enum_folder(self, folder_id):
links = []
json_data = self.api_response(
"files",
q="'{}' in parents".format(folder_id),
pageSize=100,
fields="files/id,files/mimeType,nextPageToken",
)
if json_data is None:
self.fail("API error")
if "error" in json_data:
self.fail(json_data["error"]["message"])
for f in json_data.get("files", []):
if f["mimeType"] != "application/vnd.google-apps.folder":
links.append("https://drive.google.com/file/d/" + f["id"])
elif self.config.get("dl_subfolders"):
if self.config.get("package_subfolder"):
links.append("https://drive.google.com/drive/folders/" + f["id"])
else:
links.extend(self.enum_folder(f["id"]))
next_page = json_data.get("nextPageToken", None)
while next_page:
json_data = self.api_response(
"files",
q="'{}' in parents".format(folder_id),
pageToken=next_page,
pageSize=100,
fields="files/id,files/mimeType,nextPageToken",
)
if json_data is None:
self.fail("API error")
if "error" in json_data:
self.fail(json_data["error"]["message"])
for f in json_data.get("files", []):
if f["mimeType"] != "application/vnd.google-apps.folder":
links.append("https://drive.google.com/file/d/" + f["id"])
elif self.config.get("dl_subfolders"):
if self.config.get("package_subfolder"):
links.append(
"https://drive.google.com/drive/folders/" + f["id"]
)
else:
links.extend(self.enum_folder(f["id"]))
next_page = json_data.get("nextPageToken", None)
return links
def decrypt(self, pyfile):
links = []
json_data = self.api_response("files/{}".format(self.info["pattern"]["ID"]))
if json_data is None:
self.fail("API error")
if "error" in json_data:
if json_data["error"]["code"] == 404:
self.offline()
else:
self.fail(json_data["error"]["message"])
pack_name = json_data.get("name", pyfile.package().name)
links = self.enum_folder(self.info["pattern"]["ID"])
if links:
self.packages = [(pack_name, links, pack_name)]
|
agpl-3.0
| -5,063,890,733,911,918,000
| 32.046053
| 118
| 0.495122
| false
| 3.843152
| false
| false
| false
|
mpeyrotc/govector
|
condominios/forms.py
|
1
|
2051
|
# coding=utf-8
from django import forms
import pyodbc
class LoginForm(forms.Form):
username = forms.CharField(label="Nombre de Usuario", max_length=50)
password = forms.CharField(label="Contraseña", widget=forms.PasswordInput)
widgets = {
"password" : forms.PasswordInput(),
}
def clean(self):
cleaned_data = super(LoginForm, self).clean()
username = cleaned_data.get("username")
password = cleaned_data.get("password")
try:
server = 'CINTERMEX2003'
database = 'Condominos'
usernameDB = 'sa'
passwordDB = 'sis2333'
driver = '{ODBC Driver 13 for SQL Server}'
cnxn = pyodbc.connect(
'DRIVER=' + driver + ';PORT=61451;SERVER=' + server + ';PORT=1443;DATABASE=' + database + ';UID=' + usernameDB + ';PWD=' + passwordDB)
cur = cnxn.cursor()
querystring = "SELECT Username, Password, RFC FROM Usuarios WHERE Username = '{username}' AND Password = '{password}'".format(username=username, password=password)
cur.execute(querystring)
nombreusuario = cur.fetchall()
cur.close()
if nombreusuario[0][0] == username and nombreusuario[0][1] == password:
return cleaned_data
else:
raise forms.ValidationError("Usuario o contraseña invalidos")
except:
raise forms.ValidationError("El nombre de usuario no existe")
return cleaned_data
def clean_username(self):
username = self.cleaned_data.get("username")
if not username:
raise forms.ValidationError("Proporcione un nombre de usuario")
return username
def clean_password(self):
password = self.cleaned_data.get("password")
if not password:
raise forms.ValidationError("Proporcione una contraseña")
return password
|
mit
| -3,781,641,314,589,348,400
| 33.15
| 175
| 0.571777
| false
| 4.366738
| false
| false
| false
|
kartvep/Combaine
|
combaine/plugins/DistributedStorage/MongoReplicaSet.py
|
1
|
2716
|
from __AbstractStorage import AbstractDistributedStorage
import pymongo
import hashlib
import time
class MongoReplicaSet(AbstractDistributedStorage):
def __init__(self, **config):
self.hosts = config['hosts']
self.rs = None
self.db = None
self.coll_name = None
self.cache_key_list = list()
def connect(self, namespace):
try:
self.rs = pymongo.Connection(self.hosts, fsync=True)
db, collection = namespace.split('/')
self.coll_name = collection
self.db = self.rs[db]
if collection in self.db.collection_names():
if not self.db[collection].options().get("capped"):
self.db.drop_collection(collection)
self.db.create_collection(collection, capped=True, size=500000000, max=2000)
else:
self.db.create_collection(collection, capped=True, size=500000000, max=2000)
self.db_cursor = self.db[collection]
self.db_cursor.ensure_index("_id")
except Exception, err:
print str(err)
return False
else:
return True
def close(self):
try:
self.rs.close()
except Exception, err:
print err
return False
else:
return True
def insert(self, key, data):
try:
print key
_id = hashlib.md5(key).hexdigest()
print data
value = {"_id" : _id, "key" : key, "value" : data, "time" : int(time.time()) }
#print self.db_cursor.insert(value, continue_on_error=True, w=0, manipulate=False)
print self.db_cursor.save(value, continue_on_error=True, w=1, manipulate=False)
except Exception, err:
print str(err)
return False
else:
return True
def read(self, key, cache=False):
try:
_id = hashlib.md5(key).hexdigest()
ret = self.db_cursor.find_one({"_id" : _id }, fields={"key" : False, "_id" : False, "time" : False})
if ret is not None:
if cache:
self.cache_key_list.append(key)
return ret["value"]
else:
return []
except Exception as err:
print str(err)
return []
def remove(self, key):
try:
return "OK" #for capped
_id = hashlib.md5(key).hexdigest()
return str(self.db_cursor.remove(_id, w=1))
except Exception as err:
print str(err)
return False
else:
return True
PLUGIN_CLASS = MongoReplicaSet
|
lgpl-3.0
| 6,870,602,319,103,538,000
| 31.333333
| 112
| 0.529823
| false
| 4.159265
| false
| false
| false
|
claymation/django-zendesk
|
djzendesk/tests.py
|
1
|
3695
|
import base64
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
def http_basic_auth_string(username, password):
credentials = base64.encodestring('%s:%s' % (username, password)).strip()
auth_string = 'Basic %s' % credentials
return auth_string
@mock.patch.object(settings, 'ZENDESK_CALLBACK_USERNAME', 'foo')
@mock.patch.object(settings, 'ZENDESK_CALLBACK_PASSWORD', 'bar')
@mock.patch('djzendesk.views.target_callback_received')
class DjangoZendeskTestCase(TestCase):
def test_view_requires_post(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
# Test GET
response = self.client.get(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test PUT
response = self.client.put(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test DELETE
response = self.client.delete(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test HEAD
response = self.client.head(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test POST
response = self.client.post(url, {'message': 'Hello, world!'})
self.assertNotEqual(response.status_code, 405)
def test_view_requires_authentication(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
# Test no credentials
response = self.client.post(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 403)
# Test wrong credentials
auth_string = http_basic_auth_string(username='foo', password='bad')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertEqual(response.status_code, 403)
# Test correct credentials
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertNotEqual(response.status_code, 403)
def test_view_requires_message(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
# Test without message
response = self.client.post(url, {'blah': 'blah'}, HTTP_AUTHORIZATION=auth_string)
self.assertEqual(response.status_code, 400)
# Test with message
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertNotEqual(response.status_code, 400)
def test_view_ok(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertContains(response, 'OK')
def test_view_sends_signal(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
mock_target_callback_received.send.assert_called_once_with(
sender=None,
ticket_id='123',
message='Hello, world!',
)
|
bsd-3-clause
| -4,604,251,067,905,810,000
| 42.470588
| 102
| 0.658457
| false
| 3.793634
| true
| false
| false
|
taigaio/taiga-back
|
taiga/users/migrations/0001_initial.py
|
1
|
3199
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import re
import django.core.validators
import taiga.users.models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0001_initial"),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=30, help_text='Required. 30 characters or fewer. Letters, numbers and /./-/_ characters', verbose_name='username', unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.-]+$', 32), 'Enter a valid username.', 'invalid')])),
('email', models.EmailField(max_length=75, blank=True, verbose_name='email address')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('full_name', models.CharField(max_length=256, blank=True, verbose_name='full name')),
('color', models.CharField(default=taiga.users.models.generate_random_hex_color, max_length=9, blank=True, verbose_name='color')),
('bio', models.TextField(default='', blank=True, verbose_name='biography')),
('photo', models.FileField(null=True, max_length=500, blank=True, verbose_name='photo', upload_to='users/photo')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('default_language', models.CharField(default='', max_length=20, blank=True, verbose_name='default language')),
('default_timezone', models.CharField(default='', max_length=20, blank=True, verbose_name='default timezone')),
('colorize_tags', models.BooleanField(default=False, verbose_name='colorize tags')),
('token', models.CharField(default=None, max_length=200, blank=True, verbose_name='token', null=True)),
('email_token', models.CharField(default=None, max_length=200, blank=True, verbose_name='email token', null=True)),
('new_email', models.EmailField(null=True, max_length=75, blank=True, verbose_name='new email address')),
('github_id', models.IntegerField(null=True, blank=True, verbose_name='github ID')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'ordering': ['username'],
},
bases=(models.Model,),
),
]
|
agpl-3.0
| 7,655,461,915,151,665,000
| 65.645833
| 306
| 0.632385
| false
| 4.220317
| false
| false
| false
|
tiffanyj41/hermes
|
src/utils/lastfm_etl/lastfm.py
|
3
|
8114
|
#!/usr/bin/env python
"""Translate the Last.fm data files to JSON.
This script takes the various Last.fm data files and write them out as
JSON. It removes the Last.fm artist URLs.
Attributes:
ARTISTS (dict): A dictionary that stores information about the artists. The
variables are as follows:
- artist_id (int): A unique identifier for each artist.
- name (str): The name of the artist.
FRIENDS (dict): A dictionary that stores information about the friends
graph. The variables are as follows:
- user_id (int): A unique identifier for each user.
- friend_user_id (int): A unique identifier of a user on the
friends list.
TAGS (dict): A dictionary that stores information about the tags. The
variables are as follows:
- tag_id (int): A unique identifier for each tag.
- name (int): The name of the tag.
PLAYS (dict): A dictionary that stores information about the number of
plays by each user. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- plays (int): The number of plays by the user of the artist.
APPLIED_TAGS (dict): A dictionary that stores information about the tags
various users applied to various artists. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- tag_id (int): A unique identifier for each tag.
- day (int): The day the tag was added.
- month (int): The month the tag was added.
- year (int): The year the tag was added.
"""
from copy import deepcopy
import json
import csv
# JSON objects
ARTISTS = {
"artist_id": None,
"name": None,
}
FRIENDS = {
"user_id": None,
"friend_user_id": None,
}
TAGS = {
"tag_id": None,
"name": None,
}
PLAYS = {
"user_id": None,
"artist_id": None,
"plays": None,
}
APPLIED_TAGS = {
"user_id": None,
"artist_id": None,
"tag_id": None,
"day": None,
"month": None,
"year": None,
}
def convert_str(string):
"""Convert a string from 'iso-8859-1' to 'utf8'."""
return string.decode('iso-8859-1').encode('utf8')
def iter_lines(open_file):
"""Open the Last.fm CSVs and return an iterator over the lines.
Args:
open_file: A file handle object from open().
Retunrs:
iterator: An iterator over each line in the file. Each line is a list,
with string elements for each column value.
"""
reader = csv.reader(
open_file,
delimiter='\t',
)
next(reader) # Skip the header
return reader
def parse_artist_line(line):
"""Parse a line from the Artist CSV file.
A line is a list of strings as follows:
line = [
artist_id,
name,
band_url,
band_photo_url,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "artist_id" and
"name".
"""
(artist_id, name, _, _) = line
current_artist = deepcopy(ARTISTS)
current_artist["artist_id"] = int(artist_id)
current_artist["name"] = name
return current_artist
def parse_friends_line(line):
"""Parse a line from the Friends CSV file.
A line is a list of strings as follows:
line = [
user_id,
user_id_of_friend,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id" and "friend_user_id".
"""
(user_id, friend_id) = line
current_friend = deepcopy(FRIENDS)
current_friend["user_id"] = int(user_id)
current_friend["friend_user_id"] = int(friend_id)
return current_friend
def parse_tag_line(line):
"""Parse a line from the Tag CSV file.
A line is a list of strings as follows:
line = [
tag_id,
tag,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "tag_id" and "tag".
"""
(tag_id, tag) = line
current_tag = deepcopy(TAGS)
current_tag["tag_id"] = int(tag_id)
current_tag["name"] = convert_str(tag)
return current_tag
def parse_applied_tag_line(line):
"""Parse a line from the Applied Tags CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
tag_id,
day,
month,
year,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id",
"tag_id", "day", "month", and "year".
"""
(user_id, artist_id, tag_id, day, month, year) = line
current_tag = deepcopy(APPLIED_TAGS)
current_tag["user_id"] = int(user_id)
current_tag["artist_id"] = int(artist_id)
current_tag["tag_id"] = int(tag_id)
current_tag["day"] = int(day)
current_tag["month"] = int(month)
current_tag["year"] = int(year)
return current_tag
def parse_plays_line(line):
"""Parse a line from the Played Artists CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
play_count,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id", and
"plays".
"""
(user_id, artist_id, plays) = line
current_plays = deepcopy(PLAYS)
current_plays["user_id"] = int(user_id)
current_plays["artist_id"] = int(artist_id)
current_plays["plays"] = int(plays)
return current_plays
if __name__ == "__main__":
import argparse
# Set up command line flag handling
parser = argparse.ArgumentParser(
description="Transform the Last.FM datasets to JSON",
)
parser.add_argument(
'artists',
type=str,
help="the file containing the artists, normally 'artists.dat'",
)
parser.add_argument(
'tags',
type=str,
help="the file containing the tags, normally 'tags.dat'",
)
parser.add_argument(
'friends',
type=str,
help="the file containing the friends graph, normally 'user_friends.dat'",
)
parser.add_argument(
'applied_tags',
type=str,
help="the file containing the applied tags, normally 'user_taggedartists.dat'",
)
parser.add_argument(
'plays',
type=str,
help="the file containing the play counts, normally 'user_artists.dat'",
)
parser.add_argument(
'-o',
'--output-directory',
type=str,
action="store",
help="the directory to save the output JSON files, by default the current directory",
default="./",
)
args = parser.parse_args()
# Parse the files
processing_queue = (
(args.artists, args.output_directory + "/lastfm_artists.json", parse_artist_line),
(args.tags, args.output_directory + "/lastfm_tags.json", parse_tag_line),
(args.friends, args.output_directory + "/lastfm_friends.json", parse_friends_line),
(args.applied_tags, args.output_directory + "/lastfm_applied_tags.json", parse_applied_tag_line),
(args.plays, args.output_directory + "/lastfm_plays.json", parse_plays_line),
)
for input_file, output_file, function in processing_queue:
with open(input_file, 'rb') as csv_file, open(output_file, 'w') as json_file:
for row in iter_lines(csv_file):
json_file.write(json.dumps(function(row)) + '\n')
|
apache-2.0
| 1,649,158,540,482,051,300
| 26.137124
| 105
| 0.579123
| false
| 3.834594
| false
| false
| false
|
manyunkai/tweixin
|
weixin/docking/utils/menu.py
|
1
|
8631
|
# -*-coding:utf-8 -*-
"""
Created on 11/26/2015
@author: Danny<manyunkai@hotmail.com>
DannyWork Project.
"""
import json
import tornado.gen
from .request import request_async
from .token import get_access_token
from exception.request import GetSelfMenuFailed
from docking.models.material import Material, NewsMessageItem, NewsMessage, NewsMessageItemsMapping, MaterialNewsMapping
from docking.models.menu import Menu
from docking.models.event import EventRule
from docking.utils.generic import generate_random_key
SELFMENU_INFO_URL = 'https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info'
@tornado.gen.coroutine
def set_menu_with_materials(account, agent, buttons, parent=0):
for data in buttons:
btype = data.get('type')
menu_params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'parent_id': parent,
'name': data['name'],
}
if btype:
if btype == 'view':
menu_params.update({
'type': 'view',
'url': data['url']
})
elif btype in ['click', 'scancode_push', 'scancode_waitmsg', 'pic_sysphoto',
'pic_photo_or_album', 'pic_weixin', 'location_select']:
menu_params.update({
'type': btype,
'key': data['key'][:16]
})
elif btype in ['media_id', 'view_limited']:
menu_params.update({
'type': btype,
'media_id': data['media_id']
})
elif btype == 'text':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'T',
'description': data['value']
}
material = yield Material(alias='文本消息', title='文本消息', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'img':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'P',
'media_id': data['value']
}
material = yield Material(alias='远程图片', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'voice':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'V',
'media_id': data['value']
}
material = yield Material(alias='远程语音', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'video':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'F',
'fltype': 'R',
'file': data['value']
}
material = yield Material(alias='远程视频', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'news':
news = yield NewsMessage(account_id=account.id,
account_agent_id=agent.id if agent else 0,
alias='图文消息').get_or_create()
ordering = 1
for item in data['news_info']['list']:
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'title': item['title'][:16],
'description': item['digest'],
'pltype': 'R',
'pic_large': item['cover_url'],
'pic_small': item['cover_url'],
'url': item['content_url']
}
item = yield NewsMessageItem(**params).get_or_create()
yield NewsMessageItemsMapping(news_id=news.id, item_id=item.id, ordering=ordering).get_or_create(news_id=news.id, item_id=item.id)
ordering += 1
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'N',
}
material = yield Material(alias='图文消息', **params).get_or_create(**params)
params = {
'material_id': material.id,
'news_id': news.id,
}
yield MaterialNewsMapping(**params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
print('menu_params:', menu_params)
yield Menu(**menu_params).get_or_create(**menu_params)
else:
menu_params['type'] = 'parent'
menu = yield Menu(**menu_params).get_or_create(**menu_params)
yield set_menu_with_materials(account, agent, data['sub_button']['list'], parent=menu.id)
@tornado.gen.coroutine
def init_selfmenu_info(account, agent=None):
"""
Pull the current selfmenu.
Please note that this method will cover the current menus.
:param account: docking.models.Account instance.
:param agent: docking.models.Agent instance.
:return: True if succeed.
"""
access_token = yield get_access_token(account)
params = {
'access_token': access_token
}
response = yield request_async(SELFMENU_INFO_URL, params)
try:
data = json.loads(response.decode('utf8'))
except AttributeError:
raise GetSelfMenuFailed('Error in decoding response data.')
except (TypeError, ValueError) as e:
raise GetSelfMenuFailed('Error in parsing response data: {0}.'.format(e))
# Clear existed menus.
query = {
'account_id': account.id
}
if agent:
query['account_agent_id'] = agent.id
yield Menu(**query).delete()
# Parse the new menus.
try:
yield set_menu_with_materials(account, agent, data['selfmenu_info']['button'])
except Exception as e:
raise GetSelfMenuFailed('Error in parsing response data: {0}.'.format(str(e)))
|
gpl-2.0
| 6,789,176,383,028,225,000
| 35.645299
| 150
| 0.464723
| false
| 4.33738
| false
| false
| false
|
Hobsons/hippo
|
data_sources/redis_queue.py
|
1
|
1363
|
import redis
from data_sources.hippo_base import HippoDataSource
class RedisQueue(HippoDataSource):
namespace = 'redis'
label = 'Redis Queue'
inputs = {
'host': {'input':'text','label':'Redis Host'},
'port': {'input':'number','label':'Redis Port','default':6379},
'db' : {'input':'number','label':'Redis DB','default':0},
'name': {'input':'text','label':'Redis Queue Key Name'}
}
def __init__(self, *args):
super().__init__(*args, namespace=RedisQueue.namespace, inputs=RedisQueue.inputs)
def process(self):
if not self.name or not self.host:
return
redis_client = redis.StrictRedis(host=self.host, port=int(self.port), db=int(self.db))
count = 0
list_name = self.name
limbo_list_name = 'hippo:queue:' + list_name + '_limbo'
limbo_items = redis_client.lrange(limbo_list_name,0,-1)
if limbo_items:
count = len(limbo_items)
self.create_tasks(limbo_items)
items = []
while count < self.new_task_limit:
i = redis_client.rpoplpush(list_name,limbo_list_name)
if i:
items.append(i)
count += 1
else:
break
if items:
self.create_tasks(items)
redis_client.delete(limbo_list_name)
|
apache-2.0
| 3,635,951,184,378,449,400
| 30
| 94
| 0.557594
| false
| 3.625
| false
| false
| false
|
acrazing/dbapi
|
dbapi/config.py
|
1
|
1038
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017 The Authors. All Rights Reserved.
# License: MIT.
# Author: acrazing <joking.young@gmail.com>.
# File: config.
"""
默认配置
"""
import logging
import os
import sys
api_config = {
'persist_file': os.path.join(os.path.expanduser("~"), ".__cache__dbapi.json"),
'headers': {
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh,zh-CN;q=0.8,zh-TW;q=0.6,en;q=0.4,en-US;q=0.2',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/538.36 (KHTML, like Gecko) '
'Chrome/57.0.3029.110 Safari/538.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'https://www.douban.com/people/junbaoyang/',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
},
'logger': 'dbapi',
'log_level': logging.DEBUG,
'log_destination': sys.stderr,
'timeout': 5.0,
}
|
mit
| -682,045,576,200,020,600
| 31.1875
| 111
| 0.596117
| false
| 2.746667
| false
| true
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.